2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
5 * The Regents of the University of California. All rights reserved.
6 * Copyright (c) 2007-2008,2010
7 * Swinburne University of Technology, Melbourne, Australia.
8 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
9 * Copyright (c) 2010 The FreeBSD Foundation
10 * Copyright (c) 2010-2011 Juniper Networks, Inc.
11 * All rights reserved.
13 * Portions of this software were developed at the Centre for Advanced Internet
14 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
15 * James Healy and David Hayes, made possible in part by a grant from the Cisco
16 * University Research Program Fund at Community Foundation Silicon Valley.
18 * Portions of this software were developed at the Centre for Advanced
19 * Internet Architectures, Swinburne University of Technology, Melbourne,
20 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
22 * Portions of this software were developed by Robert N. M. Watson under
23 * contract to Juniper Networks, Inc.
25 * Redistribution and use in source and binary forms, with or without
26 * modification, are permitted provided that the following conditions
28 * 1. Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 * 3. Neither the name of the University nor the names of its contributors
34 * may be used to endorse or promote products derived from this software
35 * without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
56 #include "opt_inet6.h"
57 #include "opt_ipsec.h"
58 #include "opt_tcpdebug.h"
60 #include <sys/param.h>
62 #include <sys/kernel.h>
64 #include <sys/hhook.h>
66 #include <sys/malloc.h>
68 #include <sys/proc.h> /* for proc0 declaration */
69 #include <sys/protosw.h>
70 #include <sys/qmath.h>
72 #include <sys/signalvar.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/sysctl.h>
76 #include <sys/syslog.h>
77 #include <sys/systm.h>
78 #include <sys/stats.h>
80 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
85 #include <net/if_var.h>
86 #include <net/route.h>
89 #define TCPSTATES /* for logging */
91 #include <netinet/in.h>
92 #include <netinet/in_kdtrace.h>
93 #include <netinet/in_pcb.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
97 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
98 #include <netinet/ip_var.h>
99 #include <netinet/ip_options.h>
100 #include <netinet/ip6.h>
101 #include <netinet/icmp6.h>
102 #include <netinet6/in6_pcb.h>
103 #include <netinet6/in6_var.h>
104 #include <netinet6/ip6_var.h>
105 #include <netinet6/nd6.h>
106 #include <netinet/tcp.h>
107 #include <netinet/tcp_fsm.h>
108 #include <netinet/tcp_log_buf.h>
109 #include <netinet/tcp_seq.h>
110 #include <netinet/tcp_timer.h>
111 #include <netinet/tcp_var.h>
112 #include <netinet6/tcp6_var.h>
113 #include <netinet/tcpip.h>
114 #include <netinet/cc/cc.h>
115 #include <netinet/tcp_fastopen.h>
117 #include <netinet/tcp_pcap.h>
119 #include <netinet/tcp_syncache.h>
121 #include <netinet/tcp_debug.h>
122 #endif /* TCPDEBUG */
124 #include <netinet/tcp_offload.h>
126 #include <netinet/udp.h>
128 #include <netipsec/ipsec_support.h>
130 #include <machine/in_cksum.h>
132 #include <security/mac/mac_framework.h>
134 const int tcprexmtthresh = 3;
136 VNET_DEFINE(int, tcp_log_in_vain) = 0;
137 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_VNET | CTLFLAG_RW,
138 &VNET_NAME(tcp_log_in_vain), 0,
139 "Log all incoming TCP segments to closed ports");
141 VNET_DEFINE(int, blackhole) = 0;
142 #define V_blackhole VNET(blackhole)
143 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW,
144 &VNET_NAME(blackhole), 0,
145 "Do not send RST on segments to closed ports");
147 VNET_DEFINE(int, tcp_delack_enabled) = 1;
148 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW,
149 &VNET_NAME(tcp_delack_enabled), 0,
150 "Delay ACK to try and piggyback it onto a data packet");
152 VNET_DEFINE(int, drop_synfin) = 0;
153 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW,
154 &VNET_NAME(drop_synfin), 0,
155 "Drop TCP packets with SYN+FIN set");
157 VNET_DEFINE(int, tcp_do_prr_conservative) = 0;
158 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_prr_conservative, CTLFLAG_VNET | CTLFLAG_RW,
159 &VNET_NAME(tcp_do_prr_conservative), 0,
160 "Do conservative Proportional Rate Reduction");
162 VNET_DEFINE(int, tcp_do_prr) = 1;
163 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_prr, CTLFLAG_VNET | CTLFLAG_RW,
164 &VNET_NAME(tcp_do_prr), 1,
165 "Enable Proportional Rate Reduction per RFC 6937");
167 VNET_DEFINE(int, tcp_do_lrd) = 0;
168 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_lrd, CTLFLAG_VNET | CTLFLAG_RW,
169 &VNET_NAME(tcp_do_lrd), 1,
170 "Perform Lost Retransmission Detection");
172 VNET_DEFINE(int, tcp_do_newcwv) = 0;
173 SYSCTL_INT(_net_inet_tcp, OID_AUTO, newcwv, CTLFLAG_VNET | CTLFLAG_RW,
174 &VNET_NAME(tcp_do_newcwv), 0,
175 "Enable New Congestion Window Validation per RFC7661");
177 VNET_DEFINE(int, tcp_do_rfc3042) = 1;
178 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW,
179 &VNET_NAME(tcp_do_rfc3042), 0,
180 "Enable RFC 3042 (Limited Transmit)");
182 VNET_DEFINE(int, tcp_do_rfc3390) = 1;
183 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW,
184 &VNET_NAME(tcp_do_rfc3390), 0,
185 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
187 VNET_DEFINE(int, tcp_initcwnd_segments) = 10;
188 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments,
189 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0,
190 "Slow-start flight size (initial congestion window) in number of segments");
192 VNET_DEFINE(int, tcp_do_rfc3465) = 1;
193 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW,
194 &VNET_NAME(tcp_do_rfc3465), 0,
195 "Enable RFC 3465 (Appropriate Byte Counting)");
197 VNET_DEFINE(int, tcp_abc_l_var) = 2;
198 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW,
199 &VNET_NAME(tcp_abc_l_var), 2,
200 "Cap the max cwnd increment during slow-start to this number of segments");
202 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn,
203 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
206 VNET_DEFINE(int, tcp_do_ecn) = 2;
207 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
208 &VNET_NAME(tcp_do_ecn), 0,
211 VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
212 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_VNET | CTLFLAG_RW,
213 &VNET_NAME(tcp_ecn_maxretries), 0,
214 "Max retries before giving up on ECN");
216 VNET_DEFINE(int, tcp_insecure_syn) = 0;
217 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW,
218 &VNET_NAME(tcp_insecure_syn), 0,
219 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets");
221 VNET_DEFINE(int, tcp_insecure_rst) = 0;
222 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW,
223 &VNET_NAME(tcp_insecure_rst), 0,
224 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets");
226 VNET_DEFINE(int, tcp_recvspace) = 1024*64;
227 #define V_tcp_recvspace VNET(tcp_recvspace)
228 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW,
229 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size");
231 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
232 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
233 &VNET_NAME(tcp_do_autorcvbuf), 0,
234 "Enable automatic receive buffer sizing");
236 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
237 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
238 &VNET_NAME(tcp_autorcvbuf_max), 0,
239 "Max size of automatic receive buffer");
241 VNET_DEFINE(struct inpcbhead, tcb);
242 #define tcb6 tcb /* for KAME src sync over BSD*'s */
243 VNET_DEFINE(struct inpcbinfo, tcbinfo);
246 * TCP statistics are stored in an array of counter(9)s, which size matches
247 * size of struct tcpstat. TCP running connection count is a regular array.
249 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat);
250 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat,
251 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
252 VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]);
253 SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD |
254 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES,
255 "TCP connection counts by TCP state");
258 tcp_vnet_init(const void *unused)
261 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK);
262 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK);
264 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
265 tcp_vnet_init, NULL);
269 tcp_vnet_uninit(const void *unused)
272 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES);
273 VNET_PCPUSTAT_FREE(tcpstat);
275 VNET_SYSUNINIT(tcp_vnet_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
276 tcp_vnet_uninit, NULL);
280 * Kernel module interface for updating tcpstat. The first argument is an index
281 * into tcpstat treated as an array.
284 kmod_tcpstat_add(int statnum, int val)
287 counter_u64_add(VNET(tcpstat)[statnum], val);
292 * Wrapper for the TCP established input helper hook.
295 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
297 struct tcp_hhook_data hhook_data;
299 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
304 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
311 * CC wrapper hook functions
314 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs,
321 INP_WLOCK_ASSERT(tp->t_inpcb);
323 tp->ccv->nsegs = nsegs;
324 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
325 if ((!V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd)) ||
326 (V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd) &&
327 (tp->snd_cwnd < (tcp_compute_pipe(tp) * 2))))
328 tp->ccv->flags |= CCF_CWND_LIMITED;
330 tp->ccv->flags &= ~CCF_CWND_LIMITED;
332 if (type == CC_ACK) {
334 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
335 ((int32_t)tp->snd_cwnd) - tp->snd_wnd);
336 if (!IN_RECOVERY(tp->t_flags))
337 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_ACKLEN,
338 tp->ccv->bytes_this_ack / (tcp_maxseg(tp) * nsegs));
339 if ((tp->t_flags & TF_GPUTINPROG) &&
340 SEQ_GEQ(th->th_ack, tp->gput_ack)) {
342 * Compute goodput in bits per millisecond.
344 gput = (((int64_t)(th->th_ack - tp->gput_seq)) << 3) /
345 max(1, tcp_ts_getticks() - tp->gput_ts);
346 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
349 * XXXLAS: This is a temporary hack, and should be
350 * chained off VOI_TCP_GPUT when stats(9) grows an API
351 * to deal with chained VOIs.
353 if (tp->t_stats_gput_prev > 0)
354 stats_voi_update_abs_s32(tp->t_stats,
356 ((gput - tp->t_stats_gput_prev) * 100) /
357 tp->t_stats_gput_prev);
358 tp->t_flags &= ~TF_GPUTINPROG;
359 tp->t_stats_gput_prev = gput;
362 if (tp->snd_cwnd > tp->snd_ssthresh) {
363 tp->t_bytes_acked += tp->ccv->bytes_this_ack;
364 if (tp->t_bytes_acked >= tp->snd_cwnd) {
365 tp->t_bytes_acked -= tp->snd_cwnd;
366 tp->ccv->flags |= CCF_ABC_SENTAWND;
369 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
370 tp->t_bytes_acked = 0;
374 if (CC_ALGO(tp)->ack_received != NULL) {
375 /* XXXLAS: Find a way to live without this */
376 tp->ccv->curack = th->th_ack;
377 CC_ALGO(tp)->ack_received(tp->ccv, type);
380 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd);
385 cc_conn_init(struct tcpcb *tp)
387 struct hc_metrics_lite metrics;
388 struct inpcb *inp = tp->t_inpcb;
392 INP_WLOCK_ASSERT(tp->t_inpcb);
394 tcp_hc_get(&inp->inp_inc, &metrics);
395 maxseg = tcp_maxseg(tp);
397 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
399 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
400 TCPSTAT_INC(tcps_usedrtt);
401 if (metrics.rmx_rttvar) {
402 tp->t_rttvar = metrics.rmx_rttvar;
403 TCPSTAT_INC(tcps_usedrttvar);
405 /* default variation is +- 1 rtt */
407 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
409 TCPT_RANGESET(tp->t_rxtcur,
410 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
411 tp->t_rttmin, TCPTV_REXMTMAX);
413 if (metrics.rmx_ssthresh) {
415 * There's some sort of gateway or interface
416 * buffer limit on the path. Use this to set
417 * the slow start threshold, but set the
418 * threshold to no less than 2*mss.
420 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh);
421 TCPSTAT_INC(tcps_usedssthresh);
425 * Set the initial slow-start flight size.
427 * If a SYN or SYN/ACK was lost and retransmitted, we have to
428 * reduce the initial CWND to one segment as congestion is likely
429 * requiring us to be cautious.
431 if (tp->snd_cwnd == 1)
432 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */
434 tp->snd_cwnd = tcp_compute_initwnd(maxseg);
436 if (CC_ALGO(tp)->conn_init != NULL)
437 CC_ALGO(tp)->conn_init(tp->ccv);
441 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
443 INP_WLOCK_ASSERT(tp->t_inpcb);
446 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
451 if (!IN_FASTRECOVERY(tp->t_flags)) {
452 tp->snd_recover = tp->snd_max;
453 if (tp->t_flags2 & TF2_ECN_PERMIT)
454 tp->t_flags2 |= TF2_ECN_SND_CWR;
458 if (!IN_CONGRECOVERY(tp->t_flags) ||
460 * Allow ECN reaction on ACK to CWR, if
461 * that data segment was also CE marked.
463 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
464 EXIT_CONGRECOVERY(tp->t_flags);
465 TCPSTAT_INC(tcps_ecn_rcwnd);
466 tp->snd_recover = tp->snd_max + 1;
467 if (tp->t_flags2 & TF2_ECN_PERMIT)
468 tp->t_flags2 |= TF2_ECN_SND_CWR;
473 tp->t_bytes_acked = 0;
474 EXIT_RECOVERY(tp->t_flags);
475 if (tp->t_flags2 & TF2_ECN_PERMIT)
476 tp->t_flags2 |= TF2_ECN_SND_CWR;
479 TCPSTAT_INC(tcps_sndrexmitbad);
480 /* RTO was unnecessary, so reset everything. */
481 tp->snd_cwnd = tp->snd_cwnd_prev;
482 tp->snd_ssthresh = tp->snd_ssthresh_prev;
483 tp->snd_recover = tp->snd_recover_prev;
484 if (tp->t_flags & TF_WASFRECOVERY)
485 ENTER_FASTRECOVERY(tp->t_flags);
486 if (tp->t_flags & TF_WASCRECOVERY)
487 ENTER_CONGRECOVERY(tp->t_flags);
488 tp->snd_nxt = tp->snd_max;
489 tp->t_flags &= ~TF_PREVVALID;
494 if (CC_ALGO(tp)->cong_signal != NULL) {
496 tp->ccv->curack = th->th_ack;
497 CC_ALGO(tp)->cong_signal(tp->ccv, type);
502 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
504 INP_WLOCK_ASSERT(tp->t_inpcb);
506 /* XXXLAS: KASSERT that we're in recovery? */
508 if (CC_ALGO(tp)->post_recovery != NULL) {
509 tp->ccv->curack = th->th_ack;
510 CC_ALGO(tp)->post_recovery(tp->ccv);
512 /* XXXLAS: EXIT_RECOVERY ? */
513 tp->t_bytes_acked = 0;
514 tp->sackhint.delivered_data = 0;
515 tp->sackhint.prr_out = 0;
519 * Indicate whether this ack should be delayed. We can delay the ack if
520 * following conditions are met:
521 * - There is no delayed ack timer in progress.
522 * - Our last ack wasn't a 0-sized window. We never want to delay
523 * the ack that opens up a 0-sized window.
524 * - LRO wasn't used for this segment. We make sure by checking that the
525 * segment size is not larger than the MSS.
527 #define DELAY_ACK(tp, tlen) \
528 ((!tcp_timer_active(tp, TT_DELACK) && \
529 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
530 (tlen <= tp->t_maxseg) && \
531 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
534 cc_ecnpkt_handler_flags(struct tcpcb *tp, uint16_t flags, uint8_t iptos)
536 INP_WLOCK_ASSERT(tp->t_inpcb);
538 if (CC_ALGO(tp)->ecnpkt_handler != NULL) {
539 switch (iptos & IPTOS_ECN_MASK) {
541 tp->ccv->flags |= CCF_IPHDR_CE;
547 case IPTOS_ECN_NOTECT:
548 tp->ccv->flags &= ~CCF_IPHDR_CE;
553 tp->ccv->flags |= CCF_TCPHDR_CWR;
555 tp->ccv->flags &= ~CCF_TCPHDR_CWR;
557 CC_ALGO(tp)->ecnpkt_handler(tp->ccv);
559 if (tp->ccv->flags & CCF_ACKNOW) {
560 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
561 tp->t_flags |= TF_ACKNOW;
567 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos)
569 cc_ecnpkt_handler_flags(tp, th->th_flags, iptos);
573 * TCP input handling is split into multiple parts:
574 * tcp6_input is a thin wrapper around tcp_input for the extended
575 * ip6_protox[] call format in ip6_input
576 * tcp_input handles primary segment validation, inpcb lookup and
577 * SYN processing on listen sockets
578 * tcp_do_segment processes the ACK and text of the segment for
579 * establishing, established and closing connections
583 tcp6_input_with_port(struct mbuf **mp, int *offp, int proto, uint16_t port)
586 struct in6_ifaddr *ia6;
590 if (m->m_len < *offp + sizeof(struct tcphdr)) {
591 m = m_pullup(m, *offp + sizeof(struct tcphdr));
594 TCPSTAT_INC(tcps_rcvshort);
595 return (IPPROTO_DONE);
600 * draft-itojun-ipv6-tcp-to-anycast
601 * better place to put this in?
603 ip6 = mtod(m, struct ip6_hdr *);
604 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
605 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
606 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
607 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
609 return (IPPROTO_DONE);
613 return (tcp_input_with_port(mp, offp, proto, port));
617 tcp6_input(struct mbuf **mp, int *offp, int proto)
620 return(tcp6_input_with_port(mp, offp, proto, 0));
625 tcp_input_with_port(struct mbuf **mp, int *offp, int proto, uint16_t port)
627 struct mbuf *m = *mp;
628 struct tcphdr *th = NULL;
629 struct ip *ip = NULL;
630 struct inpcb *inp = NULL;
631 struct tcpcb *tp = NULL;
632 struct socket *so = NULL;
643 int rstreason = 0; /* For badport_bandlim accounting purposes */
646 struct m_tag *fwd_tag = NULL;
648 struct ip6_hdr *ip6 = NULL;
651 const void *ip6 = NULL;
653 struct tcpopt to; /* options in this segment */
654 char *s = NULL; /* address and port logging */
657 * The size of tcp_saveipgen must be the size of the max ip header,
660 u_char tcp_saveipgen[IP6_HDR_LEN];
661 struct tcphdr tcp_savetcp;
668 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
675 TCPSTAT_INC(tcps_rcvtotal);
679 ip6 = mtod(m, struct ip6_hdr *);
680 th = (struct tcphdr *)((caddr_t)ip6 + off0);
681 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
684 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
685 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
686 th->th_sum = m->m_pkthdr.csum_data;
688 th->th_sum = in6_cksum_pseudo(ip6, tlen,
689 IPPROTO_TCP, m->m_pkthdr.csum_data);
690 th->th_sum ^= 0xffff;
692 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen);
694 TCPSTAT_INC(tcps_rcvbadsum);
699 * Be proactive about unspecified IPv6 address in source.
700 * As we use all-zero to indicate unbounded/unconnected pcb,
701 * unspecified IPv6 address can be used to confuse us.
703 * Note that packets with unspecified IPv6 destination is
704 * already dropped in ip6_input.
706 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
710 iptos = IPV6_TRAFFIC_CLASS(ip6);
713 #if defined(INET) && defined(INET6)
719 * Get IP and TCP header together in first mbuf.
720 * Note: IP leaves IP header in first mbuf.
722 if (off0 > sizeof (struct ip)) {
724 off0 = sizeof(struct ip);
726 if (m->m_len < sizeof (struct tcpiphdr)) {
727 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
729 TCPSTAT_INC(tcps_rcvshort);
730 return (IPPROTO_DONE);
733 ip = mtod(m, struct ip *);
734 th = (struct tcphdr *)((caddr_t)ip + off0);
735 tlen = ntohs(ip->ip_len) - off0;
740 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
741 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
742 th->th_sum = m->m_pkthdr.csum_data;
744 th->th_sum = in_pseudo(ip->ip_src.s_addr,
746 htonl(m->m_pkthdr.csum_data + tlen +
748 th->th_sum ^= 0xffff;
750 struct ipovly *ipov = (struct ipovly *)ip;
753 * Checksum extended TCP header and data.
757 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
758 ipov->ih_len = htons(tlen);
759 th->th_sum = in_cksum(m, len);
760 /* Reset length for SDT probes. */
761 ip->ip_len = htons(len);
764 /* Re-initialization for later version check */
766 ip->ip_v = IPVERSION;
767 ip->ip_hl = off0 >> 2;
770 if (th->th_sum && (port == 0)) {
771 TCPSTAT_INC(tcps_rcvbadsum);
778 * Check that TCP offset makes sense,
779 * pull out TCP options and adjust length. XXX
781 off = th->th_off << 2;
782 if (off < sizeof (struct tcphdr) || off > tlen) {
783 TCPSTAT_INC(tcps_rcvbadoff);
786 tlen -= off; /* tlen is used instead of ti->ti_len */
787 if (off > sizeof (struct tcphdr)) {
790 if (m->m_len < off0 + off) {
791 m = m_pullup(m, off0 + off);
793 TCPSTAT_INC(tcps_rcvshort);
794 return (IPPROTO_DONE);
797 ip6 = mtod(m, struct ip6_hdr *);
798 th = (struct tcphdr *)((caddr_t)ip6 + off0);
801 #if defined(INET) && defined(INET6)
806 if (m->m_len < sizeof(struct ip) + off) {
807 if ((m = m_pullup(m, sizeof (struct ip) + off))
809 TCPSTAT_INC(tcps_rcvshort);
810 return (IPPROTO_DONE);
812 ip = mtod(m, struct ip *);
813 th = (struct tcphdr *)((caddr_t)ip + off0);
817 optlen = off - sizeof (struct tcphdr);
818 optp = (u_char *)(th + 1);
820 thflags = th->th_flags;
823 * Convert TCP protocol specific fields to host format.
825 tcp_fields_to_host(th);
828 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
830 drop_hdrlen = off0 + off;
833 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
837 (isipv6 && (m->m_flags & M_IP6_NEXTHOP))
839 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP))
842 #if defined(INET) && !defined(INET6)
843 (m->m_flags & M_IP_NEXTHOP)
846 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
849 * For initial SYN packets we don't need write lock on matching
850 * PCB, be it a listening one or a synchronized one. The packet
851 * shall not modify its state.
853 lookupflag = (thflags & (TH_ACK|TH_SYN)) == TH_SYN ?
854 INPLOOKUP_RLOCKPCB : INPLOOKUP_WLOCKPCB;
857 if (isipv6 && fwd_tag != NULL) {
858 struct sockaddr_in6 *next_hop6;
860 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
862 * Transparently forwarded. Pretend to be the destination.
863 * Already got one like this?
865 inp = in6_pcblookup_mbuf(&V_tcbinfo,
866 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
867 lookupflag, m->m_pkthdr.rcvif, m);
870 * It's new. Try to find the ambushing socket.
871 * Because we've rewritten the destination address,
872 * any hardware-generated hash is ignored.
874 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
875 th->th_sport, &next_hop6->sin6_addr,
876 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
877 th->th_dport, INPLOOKUP_WILDCARD | lookupflag,
881 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
882 th->th_sport, &ip6->ip6_dst, th->th_dport,
883 INPLOOKUP_WILDCARD | lookupflag, m->m_pkthdr.rcvif, m);
886 #if defined(INET6) && defined(INET)
890 if (fwd_tag != NULL) {
891 struct sockaddr_in *next_hop;
893 next_hop = (struct sockaddr_in *)(fwd_tag+1);
895 * Transparently forwarded. Pretend to be the destination.
896 * already got one like this?
898 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
899 ip->ip_dst, th->th_dport, lookupflag, m->m_pkthdr.rcvif, m);
902 * It's new. Try to find the ambushing socket.
903 * Because we've rewritten the destination address,
904 * any hardware-generated hash is ignored.
906 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
907 th->th_sport, next_hop->sin_addr,
908 next_hop->sin_port ? ntohs(next_hop->sin_port) :
909 th->th_dport, INPLOOKUP_WILDCARD | lookupflag,
913 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
914 th->th_sport, ip->ip_dst, th->th_dport,
915 INPLOOKUP_WILDCARD | lookupflag, m->m_pkthdr.rcvif, m);
919 * If the INPCB does not exist then all data in the incoming
920 * segment is discarded and an appropriate RST is sent back.
921 * XXX MRT Send RST using which routing table?
925 * Log communication attempts to ports that are not
928 if ((V_tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
929 V_tcp_log_in_vain == 2) {
930 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
931 log(LOG_INFO, "%s; %s: Connection attempt "
932 "to closed port\n", s, __func__);
935 * When blackholing do not respond with a RST but
936 * completely ignore the segment and drop it.
938 if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
942 rstreason = BANDLIM_RST_CLOSEDPORT;
945 INP_LOCK_ASSERT(inp);
947 * While waiting for inp lock during the lookup, another thread
948 * can have dropped the inpcb, in which case we need to loop back
949 * and try to find a new inpcb to deliver to.
951 if (inp->inp_flags & INP_DROPPED) {
956 if ((inp->inp_flowtype == M_HASHTYPE_NONE) &&
957 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) &&
958 ((inp->inp_socket == NULL) || !SOLISTENING(inp->inp_socket))) {
959 inp->inp_flowid = m->m_pkthdr.flowid;
960 inp->inp_flowtype = M_HASHTYPE_GET(m);
962 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
964 if (isipv6 && IPSEC_ENABLED(ipv6) &&
965 IPSEC_CHECK_POLICY(ipv6, m, inp) != 0) {
973 if (IPSEC_ENABLED(ipv4) &&
974 IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) {
981 * Check the minimum TTL for socket.
983 if (inp->inp_ip_minttl != 0) {
986 if (inp->inp_ip_minttl > ip6->ip6_hlim)
990 if (inp->inp_ip_minttl > ip->ip_ttl)
995 * A previous connection in TIMEWAIT state is supposed to catch stray
996 * or duplicate segments arriving late. If this segment was a
997 * legitimate new connection attempt, the old INPCB gets removed and
998 * we can try again to find a listening socket.
1000 if (inp->inp_flags & INP_TIMEWAIT) {
1001 tcp_dooptions(&to, optp, optlen,
1002 (thflags & TH_SYN) ? TO_SYN : 0);
1004 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
1006 if (tcp_twcheck(inp, &to, th, m, tlen))
1008 return (IPPROTO_DONE);
1011 * The TCPCB may no longer exist if the connection is winding
1012 * down or it is in the CLOSED state. Either way we drop the
1013 * segment and send an appropriate response.
1015 tp = intotcpcb(inp);
1016 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
1017 rstreason = BANDLIM_RST_CLOSEDPORT;
1021 if ((tp->t_port != port) && (tp->t_state > TCPS_LISTEN)) {
1022 rstreason = BANDLIM_RST_CLOSEDPORT;
1027 if (tp->t_flags & TF_TOE) {
1028 tcp_offload_input(tp, m);
1029 m = NULL; /* consumed by the TOE driver */
1035 if (mac_inpcb_check_deliver(inp, m))
1038 so = inp->inp_socket;
1039 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1041 if (so->so_options & SO_DEBUG) {
1042 ostate = tp->t_state;
1045 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
1048 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1051 #endif /* TCPDEBUG */
1053 * When the socket is accepting connections (the INPCB is in LISTEN
1054 * state) we look into the SYN cache if this is a new connection
1055 * attempt or the completion of a previous one.
1057 KASSERT(tp->t_state == TCPS_LISTEN || !SOLISTENING(so),
1058 ("%s: so accepting but tp %p not listening", __func__, tp));
1059 if (tp->t_state == TCPS_LISTEN && SOLISTENING(so)) {
1060 struct in_conninfo inc;
1062 bzero(&inc, sizeof(inc));
1065 inc.inc_flags |= INC_ISIPV6;
1066 if (inp->inp_inc.inc_flags & INC_IPV6MINMTU)
1067 inc.inc_flags |= INC_IPV6MINMTU;
1068 inc.inc6_faddr = ip6->ip6_src;
1069 inc.inc6_laddr = ip6->ip6_dst;
1073 inc.inc_faddr = ip->ip_src;
1074 inc.inc_laddr = ip->ip_dst;
1076 inc.inc_fport = th->th_sport;
1077 inc.inc_lport = th->th_dport;
1078 inc.inc_fibnum = so->so_fibnum;
1081 * Check for an existing connection attempt in syncache if
1082 * the flag is only ACK. A successful lookup creates a new
1083 * socket appended to the listen queue in SYN_RECEIVED state.
1085 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1087 * Parse the TCP options here because
1088 * syncookies need access to the reflected
1091 tcp_dooptions(&to, optp, optlen, 0);
1093 * NB: syncache_expand() doesn't unlock
1094 * inp and tcpinfo locks.
1096 rstreason = syncache_expand(&inc, &to, th, &so, m, port);
1097 if (rstreason < 0) {
1099 * A failing TCP MD5 signature comparison
1100 * must result in the segment being dropped
1101 * and must not produce any response back
1105 } else if (rstreason == 0) {
1107 * No syncache entry or ACK was not
1108 * for our SYN/ACK. Send a RST.
1109 * NB: syncache did its own logging
1110 * of the failure cause.
1112 rstreason = BANDLIM_RST_OPENPORT;
1118 * We completed the 3-way handshake
1119 * but could not allocate a socket
1120 * either due to memory shortage,
1121 * listen queue length limits or
1122 * global socket limits. Send RST
1123 * or wait and have the remote end
1124 * retransmit the ACK for another
1127 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1128 log(LOG_DEBUG, "%s; %s: Listen socket: "
1129 "Socket allocation failed due to "
1130 "limits or memory shortage, %s\n",
1132 V_tcp_sc_rst_sock_fail ?
1133 "sending RST" : "try again");
1134 if (V_tcp_sc_rst_sock_fail) {
1135 rstreason = BANDLIM_UNLIMITED;
1141 * Socket is created in state SYN_RECEIVED.
1142 * Unlock the listen socket, lock the newly
1143 * created socket and update the tp variable.
1144 * If we came here via jump to tfo_socket_result,
1145 * then listening socket is read-locked.
1147 INP_UNLOCK(inp); /* listen socket */
1148 inp = sotoinpcb(so);
1150 * New connection inpcb is already locked by
1151 * syncache_expand().
1153 INP_WLOCK_ASSERT(inp);
1154 tp = intotcpcb(inp);
1155 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1156 ("%s: ", __func__));
1158 * Process the segment and the data it
1159 * contains. tcp_do_segment() consumes
1160 * the mbuf chain and unlocks the inpcb.
1162 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1163 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1165 return (IPPROTO_DONE);
1168 * Segment flag validation for new connection attempts:
1170 * Our (SYN|ACK) response was rejected.
1171 * Check with syncache and remove entry to prevent
1174 * NB: syncache_chkrst does its own logging of failure
1177 if (thflags & TH_RST) {
1178 syncache_chkrst(&inc, th, m, port);
1182 * We can't do anything without SYN.
1184 if ((thflags & TH_SYN) == 0) {
1185 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1186 log(LOG_DEBUG, "%s; %s: Listen socket: "
1187 "SYN is missing, segment ignored\n",
1189 TCPSTAT_INC(tcps_badsyn);
1193 * (SYN|ACK) is bogus on a listen socket.
1195 if (thflags & TH_ACK) {
1196 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1197 log(LOG_DEBUG, "%s; %s: Listen socket: "
1198 "SYN|ACK invalid, segment rejected\n",
1200 syncache_badack(&inc, port); /* XXX: Not needed! */
1201 TCPSTAT_INC(tcps_badsyn);
1202 rstreason = BANDLIM_RST_OPENPORT;
1206 * If the drop_synfin option is enabled, drop all
1207 * segments with both the SYN and FIN bits set.
1208 * This prevents e.g. nmap from identifying the
1210 * XXX: Poor reasoning. nmap has other methods
1211 * and is constantly refining its stack detection
1213 * XXX: This is a violation of the TCP specification
1214 * and was used by RFC1644.
1216 if ((thflags & TH_FIN) && V_drop_synfin) {
1217 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1218 log(LOG_DEBUG, "%s; %s: Listen socket: "
1219 "SYN|FIN segment ignored (based on "
1220 "sysctl setting)\n", s, __func__);
1221 TCPSTAT_INC(tcps_badsyn);
1225 * Segment's flags are (SYN) or (SYN|FIN).
1227 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1228 * as they do not affect the state of the TCP FSM.
1229 * The data pointed to by TH_URG and th_urp is ignored.
1231 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1232 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1233 KASSERT(thflags & (TH_SYN),
1234 ("%s: Listen socket: TH_SYN not set", __func__));
1235 INP_RLOCK_ASSERT(inp);
1238 * If deprecated address is forbidden,
1239 * we do not accept SYN to deprecated interface
1240 * address to prevent any new inbound connection from
1241 * getting established.
1242 * When we do not accept SYN, we send a TCP RST,
1243 * with deprecated source address (instead of dropping
1244 * it). We compromise it as it is much better for peer
1245 * to send a RST, and RST will be the final packet
1248 * If we do not forbid deprecated addresses, we accept
1249 * the SYN packet. RFC2462 does not suggest dropping
1251 * If we decipher RFC2462 5.5.4, it says like this:
1252 * 1. use of deprecated addr with existing
1253 * communication is okay - "SHOULD continue to be
1255 * 2. use of it with new communication:
1256 * (2a) "SHOULD NOT be used if alternate address
1257 * with sufficient scope is available"
1258 * (2b) nothing mentioned otherwise.
1259 * Here we fall into (2b) case as we have no choice in
1260 * our source address selection - we must obey the peer.
1262 * The wording in RFC2462 is confusing, and there are
1263 * multiple description text for deprecated address
1264 * handling - worse, they are not exactly the same.
1265 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1267 if (isipv6 && !V_ip6_use_deprecated) {
1268 struct in6_ifaddr *ia6;
1270 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
1272 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1273 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1274 log(LOG_DEBUG, "%s; %s: Listen socket: "
1275 "Connection attempt to deprecated "
1276 "IPv6 address rejected\n",
1278 rstreason = BANDLIM_RST_OPENPORT;
1284 * Basic sanity checks on incoming SYN requests:
1285 * Don't respond if the destination is a link layer
1286 * broadcast according to RFC1122 4.2.3.10, p. 104.
1287 * If it is from this socket it must be forged.
1288 * Don't respond if the source or destination is a
1289 * global or subnet broad- or multicast address.
1290 * Note that it is quite possible to receive unicast
1291 * link-layer packets with a broadcast IP address. Use
1292 * in_broadcast() to find them.
1294 if (m->m_flags & (M_BCAST|M_MCAST)) {
1295 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1296 log(LOG_DEBUG, "%s; %s: Listen socket: "
1297 "Connection attempt from broad- or multicast "
1298 "link layer address ignored\n", s, __func__);
1303 if (th->th_dport == th->th_sport &&
1304 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1305 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1306 log(LOG_DEBUG, "%s; %s: Listen socket: "
1307 "Connection attempt to/from self "
1308 "ignored\n", s, __func__);
1311 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1312 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1313 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1314 log(LOG_DEBUG, "%s; %s: Listen socket: "
1315 "Connection attempt from/to multicast "
1316 "address ignored\n", s, __func__);
1321 #if defined(INET) && defined(INET6)
1326 if (th->th_dport == th->th_sport &&
1327 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1328 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1329 log(LOG_DEBUG, "%s; %s: Listen socket: "
1330 "Connection attempt from/to self "
1331 "ignored\n", s, __func__);
1334 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1335 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1336 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1337 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1338 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1339 log(LOG_DEBUG, "%s; %s: Listen socket: "
1340 "Connection attempt from/to broad- "
1341 "or multicast address ignored\n",
1348 * SYN appears to be valid. Create compressed TCP state
1352 if (so->so_options & SO_DEBUG)
1353 tcp_trace(TA_INPUT, ostate, tp,
1354 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1356 TCP_PROBE3(debug__input, tp, th, m);
1357 tcp_dooptions(&to, optp, optlen, TO_SYN);
1358 if ((so = syncache_add(&inc, &to, th, inp, so, m, NULL, NULL,
1359 iptos, port)) != NULL)
1360 goto tfo_socket_result;
1363 * Entry added to syncache and mbuf consumed.
1364 * Only the listen socket is unlocked by syncache_add().
1366 return (IPPROTO_DONE);
1367 } else if (tp->t_state == TCPS_LISTEN) {
1369 * When a listen socket is torn down the SO_ACCEPTCONN
1370 * flag is removed first while connections are drained
1371 * from the accept queue in a unlock/lock cycle of the
1372 * ACCEPT_LOCK, opening a race condition allowing a SYN
1373 * attempt go through unhandled.
1377 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1378 if (tp->t_flags & TF_SIGNATURE) {
1379 tcp_dooptions(&to, optp, optlen, thflags);
1380 if ((to.to_flags & TOF_SIGNATURE) == 0) {
1381 TCPSTAT_INC(tcps_sig_err_nosigopt);
1384 if (!TCPMD5_ENABLED() ||
1385 TCPMD5_INPUT(m, th, to.to_signature) != 0)
1389 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1392 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1393 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1394 * the inpcb, and unlocks pcbinfo.
1396 * XXXGL: in case of a pure SYN arriving on existing connection
1397 * TCP stacks won't need to modify the PCB, they would either drop
1398 * the segment silently, or send a challenge ACK. However, we try
1399 * to upgrade the lock, because calling convention for stacks is
1400 * write-lock on PCB. If upgrade fails, drop the SYN.
1402 if (lookupflag == INPLOOKUP_RLOCKPCB && INP_TRY_UPGRADE(inp) == 0)
1405 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos);
1406 return (IPPROTO_DONE);
1409 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1412 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1415 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1416 m = NULL; /* mbuf chain got consumed. */
1421 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1431 return (IPPROTO_DONE);
1435 * Automatic sizing of receive socket buffer. Often the send
1436 * buffer size is not optimally adjusted to the actual network
1437 * conditions at hand (delay bandwidth product). Setting the
1438 * buffer size too small limits throughput on links with high
1439 * bandwidth and high delay (eg. trans-continental/oceanic links).
1441 * On the receive side the socket buffer memory is only rarely
1442 * used to any significant extent. This allows us to be much
1443 * more aggressive in scaling the receive socket buffer. For
1444 * the case that the buffer space is actually used to a large
1445 * extent and we run out of kernel memory we can simply drop
1446 * the new segments; TCP on the sender will just retransmit it
1447 * later. Setting the buffer size too big may only consume too
1448 * much kernel memory if the application doesn't read() from
1449 * the socket or packet loss or reordering makes use of the
1452 * The criteria to step up the receive buffer one notch are:
1453 * 1. Application has not set receive buffer size with
1454 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE.
1455 * 2. the number of bytes received during 1/2 of an sRTT
1456 * is at least 3/8 of the current socket buffer size.
1457 * 3. receive buffer size has not hit maximal automatic size;
1459 * If all of the criteria are met we increaset the socket buffer
1460 * by a 1/2 (bounded by the max). This allows us to keep ahead
1461 * of slow-start but also makes it so our peer never gets limited
1462 * by our rwnd which we then open up causing a burst.
1464 * This algorithm does two steps per RTT at most and only if
1465 * we receive a bulk stream w/o packet losses or reorderings.
1466 * Shrinking the buffer during idle times is not necessary as
1467 * it doesn't consume any memory when idle.
1469 * TODO: Only step up if the application is actually serving
1470 * the buffer to better manage the socket buffer resources.
1473 tcp_autorcvbuf(struct mbuf *m, struct tcphdr *th, struct socket *so,
1474 struct tcpcb *tp, int tlen)
1478 if (V_tcp_do_autorcvbuf && (so->so_rcv.sb_flags & SB_AUTOSIZE) &&
1479 tp->t_srtt != 0 && tp->rfbuf_ts != 0 &&
1480 TCP_TS_TO_TICKS(tcp_ts_getticks() - tp->rfbuf_ts) >
1481 ((tp->t_srtt >> TCP_RTT_SHIFT)/2)) {
1482 if (tp->rfbuf_cnt > ((so->so_rcv.sb_hiwat / 2)/ 4 * 3) &&
1483 so->so_rcv.sb_hiwat < V_tcp_autorcvbuf_max) {
1484 newsize = min((so->so_rcv.sb_hiwat + (so->so_rcv.sb_hiwat/2)), V_tcp_autorcvbuf_max);
1486 TCP_PROBE6(receive__autoresize, NULL, tp, m, tp, th, newsize);
1488 /* Start over with next RTT. */
1492 tp->rfbuf_cnt += tlen; /* add up */
1498 tcp_input(struct mbuf **mp, int *offp, int proto)
1500 return(tcp_input_with_port(mp, offp, proto, 0));
1504 tcp_handle_wakeup(struct tcpcb *tp, struct socket *so)
1507 * Since tp might be gone if the session entered
1508 * the TIME_WAIT state before coming here, we need
1509 * to check if the socket is still connected.
1517 INP_LOCK_ASSERT(tp->t_inpcb);
1518 if (tp->t_flags & TF_WAKESOR) {
1519 tp->t_flags &= ~TF_WAKESOR;
1520 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1521 sorwakeup_locked(so);
1526 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1527 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos)
1529 int thflags, acked, ourfinisacked, needoutput = 0, sack_changed;
1530 int rstreason, todrop, win, incforsyn = 0;
1534 struct in_conninfo *inc;
1542 * The size of tcp_saveipgen must be the size of the max ip header,
1545 u_char tcp_saveipgen[IP6_HDR_LEN];
1546 struct tcphdr tcp_savetcp;
1549 thflags = th->th_flags;
1550 inc = &tp->t_inpcb->inp_inc;
1551 tp->sackhint.last_sack_ack = 0;
1553 nsegs = max(1, m->m_pkthdr.lro_nsegs);
1556 INP_WLOCK_ASSERT(tp->t_inpcb);
1557 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1559 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1563 /* Save segment, if requested. */
1564 tcp_pcap_add(th, m, &(tp->t_inpkts));
1566 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
1569 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
1570 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1571 log(LOG_DEBUG, "%s; %s: "
1572 "SYN|FIN segment ignored (based on "
1573 "sysctl setting)\n", s, __func__);
1580 * If a segment with the ACK-bit set arrives in the SYN-SENT state
1581 * check SEQ.ACK first.
1583 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
1584 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
1585 rstreason = BANDLIM_UNLIMITED;
1590 * Segment received on connection.
1591 * Reset idle time and keep-alive timer.
1592 * XXX: This should be done after segment
1593 * validation to ignore broken/spoofed segs.
1595 tp->t_rcvtime = ticks;
1598 * Scale up the window into a 32-bit value.
1599 * For the SYN_SENT state the scale is zero.
1601 tiwin = th->th_win << tp->snd_scale;
1603 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
1607 * TCP ECN processing.
1609 if (tp->t_flags2 & TF2_ECN_PERMIT) {
1610 if (thflags & TH_CWR) {
1611 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
1612 tp->t_flags |= TF_ACKNOW;
1614 switch (iptos & IPTOS_ECN_MASK) {
1616 tp->t_flags2 |= TF2_ECN_SND_ECE;
1617 TCPSTAT_INC(tcps_ecn_ce);
1619 case IPTOS_ECN_ECT0:
1620 TCPSTAT_INC(tcps_ecn_ect0);
1622 case IPTOS_ECN_ECT1:
1623 TCPSTAT_INC(tcps_ecn_ect1);
1627 /* Process a packet differently from RFC3168. */
1628 cc_ecnpkt_handler(tp, th, iptos);
1630 /* Congestion experienced. */
1631 if (thflags & TH_ECE) {
1632 cc_cong_signal(tp, th, CC_ECN);
1637 * Parse options on any incoming segment.
1639 tcp_dooptions(&to, (u_char *)(th + 1),
1640 (th->th_off << 2) - sizeof(struct tcphdr),
1641 (thflags & TH_SYN) ? TO_SYN : 0);
1643 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1644 if ((tp->t_flags & TF_SIGNATURE) != 0 &&
1645 (to.to_flags & TOF_SIGNATURE) == 0) {
1646 TCPSTAT_INC(tcps_sig_err_sigopt);
1647 /* XXX: should drop? */
1651 * If echoed timestamp is later than the current time,
1652 * fall back to non RFC1323 RTT calculation. Normalize
1653 * timestamp if syncookies were used when this connection
1656 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1657 to.to_tsecr -= tp->ts_offset;
1658 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks()))
1660 else if (tp->t_flags & TF_PREVVALID &&
1661 tp->t_badrxtwin != 0 && SEQ_LT(to.to_tsecr, tp->t_badrxtwin))
1662 cc_cong_signal(tp, th, CC_RTO_ERR);
1665 * Process options only when we get SYN/ACK back. The SYN case
1666 * for incoming connections is handled in tcp_syncache.
1667 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1668 * or <SYN,ACK>) segment itself is never scaled.
1669 * XXX this is traditional behavior, may need to be cleaned up.
1671 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1672 /* Handle parallel SYN for ECN */
1673 if (!(thflags & TH_ACK) &&
1674 ((thflags & (TH_CWR | TH_ECE)) == (TH_CWR | TH_ECE)) &&
1675 ((V_tcp_do_ecn == 1) || (V_tcp_do_ecn == 2))) {
1676 tp->t_flags2 |= TF2_ECN_PERMIT;
1677 tp->t_flags2 |= TF2_ECN_SND_ECE;
1678 TCPSTAT_INC(tcps_ecn_shs);
1680 if ((to.to_flags & TOF_SCALE) &&
1681 (tp->t_flags & TF_REQ_SCALE) &&
1682 !(tp->t_flags & TF_NOOPT)) {
1683 tp->t_flags |= TF_RCVD_SCALE;
1684 tp->snd_scale = to.to_wscale;
1686 tp->t_flags &= ~TF_REQ_SCALE;
1688 * Initial send window. It will be updated with
1689 * the next incoming segment to the scaled value.
1691 tp->snd_wnd = th->th_win;
1692 if ((to.to_flags & TOF_TS) &&
1693 (tp->t_flags & TF_REQ_TSTMP) &&
1694 !(tp->t_flags & TF_NOOPT)) {
1695 tp->t_flags |= TF_RCVD_TSTMP;
1696 tp->ts_recent = to.to_tsval;
1697 tp->ts_recent_age = tcp_ts_getticks();
1699 tp->t_flags &= ~TF_REQ_TSTMP;
1700 if (to.to_flags & TOF_MSS)
1701 tcp_mss(tp, to.to_mss);
1702 if ((tp->t_flags & TF_SACK_PERMIT) &&
1703 (!(to.to_flags & TOF_SACKPERM) ||
1704 (tp->t_flags & TF_NOOPT)))
1705 tp->t_flags &= ~TF_SACK_PERMIT;
1706 if (IS_FASTOPEN(tp->t_flags)) {
1707 if ((to.to_flags & TOF_FASTOPEN) &&
1708 !(tp->t_flags & TF_NOOPT)) {
1711 if (to.to_flags & TOF_MSS)
1714 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
1718 tcp_fastopen_update_cache(tp, mss,
1719 to.to_tfo_len, to.to_tfo_cookie);
1721 tcp_fastopen_disable_path(tp);
1726 * If timestamps were negotiated during SYN/ACK and a
1727 * segment without a timestamp is received, silently drop
1728 * the segment, unless it is a RST segment or missing timestamps are
1730 * See section 3.2 of RFC 7323.
1732 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) {
1733 if (((thflags & TH_RST) != 0) || V_tcp_tolerate_missing_ts) {
1734 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1735 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1736 "segment processed normally\n",
1741 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1742 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1743 "segment silently dropped\n", s, __func__);
1750 * If timestamps were not negotiated during SYN/ACK and a
1751 * segment with a timestamp is received, ignore the
1752 * timestamp and process the packet normally.
1753 * See section 3.2 of RFC 7323.
1755 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) {
1756 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1757 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1758 "segment processed normally\n", s, __func__);
1764 * Header prediction: check for the two common cases
1765 * of a uni-directional data xfer. If the packet has
1766 * no control flags, is in-sequence, the window didn't
1767 * change and we're not retransmitting, it's a
1768 * candidate. If the length is zero and the ack moved
1769 * forward, we're the sender side of the xfer. Just
1770 * free the data acked & wake any higher level process
1771 * that was blocked waiting for space. If the length
1772 * is non-zero and the ack didn't move, we're the
1773 * receiver side. If we're getting packets in-order
1774 * (the reassembly queue is empty), add the data to
1775 * the socket buffer and note that we need a delayed ack.
1776 * Make sure that the hidden state-flags are also off.
1777 * Since we check for TCPS_ESTABLISHED first, it can only
1780 if (tp->t_state == TCPS_ESTABLISHED &&
1781 th->th_seq == tp->rcv_nxt &&
1782 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1783 tp->snd_nxt == tp->snd_max &&
1784 tiwin && tiwin == tp->snd_wnd &&
1785 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1787 ((to.to_flags & TOF_TS) == 0 ||
1788 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1790 * If last ACK falls within this segment's sequence numbers,
1791 * record the timestamp.
1792 * NOTE that the test is modified according to the latest
1793 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1795 if ((to.to_flags & TOF_TS) != 0 &&
1796 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1797 tp->ts_recent_age = tcp_ts_getticks();
1798 tp->ts_recent = to.to_tsval;
1802 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1803 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1804 !IN_RECOVERY(tp->t_flags) &&
1805 (to.to_flags & TOF_SACK) == 0 &&
1806 TAILQ_EMPTY(&tp->snd_holes)) {
1808 * This is a pure ack for outstanding data.
1810 TCPSTAT_INC(tcps_predack);
1813 * "bad retransmit" recovery without timestamps.
1815 if ((to.to_flags & TOF_TS) == 0 &&
1816 tp->t_rxtshift == 1 &&
1817 tp->t_flags & TF_PREVVALID &&
1818 (int)(ticks - tp->t_badrxtwin) < 0) {
1819 cc_cong_signal(tp, th, CC_RTO_ERR);
1823 * Recalculate the transmit timer / rtt.
1825 * Some boxes send broken timestamp replies
1826 * during the SYN+ACK phase, ignore
1827 * timestamps of 0 or we could calculate a
1828 * huge RTT and blow up the retransmit timer.
1830 if ((to.to_flags & TOF_TS) != 0 &&
1834 t = tcp_ts_getticks() - to.to_tsecr;
1835 if (!tp->t_rttlow || tp->t_rttlow > t)
1838 TCP_TS_TO_TICKS(t) + 1);
1839 } else if (tp->t_rtttime &&
1840 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1841 if (!tp->t_rttlow ||
1842 tp->t_rttlow > ticks - tp->t_rtttime)
1843 tp->t_rttlow = ticks - tp->t_rtttime;
1845 ticks - tp->t_rtttime);
1847 acked = BYTES_THIS_ACK(tp, th);
1850 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1851 hhook_run_tcp_est_in(tp, th, &to);
1854 TCPSTAT_ADD(tcps_rcvackpack, nsegs);
1855 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1856 sbdrop(&so->so_snd, acked);
1857 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1858 SEQ_LEQ(th->th_ack, tp->snd_recover))
1859 tp->snd_recover = th->th_ack - 1;
1862 * Let the congestion control algorithm update
1863 * congestion control related information. This
1864 * typically means increasing the congestion
1867 cc_ack_received(tp, th, nsegs, CC_ACK);
1869 tp->snd_una = th->th_ack;
1871 * Pull snd_wl2 up to prevent seq wrap relative
1874 tp->snd_wl2 = th->th_ack;
1879 * If all outstanding data are acked, stop
1880 * retransmit timer, otherwise restart timer
1881 * using current (possibly backed-off) value.
1882 * If process is waiting for space,
1883 * wakeup/selwakeup/signal. If data
1884 * are ready to send, let tcp_output
1885 * decide between more output or persist.
1888 if (so->so_options & SO_DEBUG)
1889 tcp_trace(TA_INPUT, ostate, tp,
1890 (void *)tcp_saveipgen,
1893 TCP_PROBE3(debug__input, tp, th, m);
1894 if (tp->snd_una == tp->snd_max)
1895 tcp_timer_activate(tp, TT_REXMT, 0);
1896 else if (!tcp_timer_active(tp, TT_PERSIST))
1897 tcp_timer_activate(tp, TT_REXMT,
1900 if (sbavail(&so->so_snd))
1901 (void) tp->t_fb->tfb_tcp_output(tp);
1904 } else if (th->th_ack == tp->snd_una &&
1905 tlen <= sbspace(&so->so_rcv)) {
1906 int newsize = 0; /* automatic sockbuf scaling */
1909 * This is a pure, in-sequence data packet with
1910 * nothing on the reassembly queue and we have enough
1911 * buffer space to take it.
1913 /* Clean receiver SACK report if present */
1914 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1915 tcp_clean_sackreport(tp);
1916 TCPSTAT_INC(tcps_preddat);
1917 tp->rcv_nxt += tlen;
1919 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
1920 (tp->t_fbyte_in == 0)) {
1921 tp->t_fbyte_in = ticks;
1922 if (tp->t_fbyte_in == 0)
1924 if (tp->t_fbyte_out && tp->t_fbyte_in)
1925 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
1928 * Pull snd_wl1 up to prevent seq wrap relative to
1931 tp->snd_wl1 = th->th_seq;
1933 * Pull rcv_up up to prevent seq wrap relative to
1936 tp->rcv_up = tp->rcv_nxt;
1937 TCPSTAT_ADD(tcps_rcvpack, nsegs);
1938 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1940 if (so->so_options & SO_DEBUG)
1941 tcp_trace(TA_INPUT, ostate, tp,
1942 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1944 TCP_PROBE3(debug__input, tp, th, m);
1946 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
1948 /* Add data to socket buffer. */
1949 SOCKBUF_LOCK(&so->so_rcv);
1950 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1954 * Set new socket buffer size.
1955 * Give up when limit is reached.
1958 if (!sbreserve_locked(&so->so_rcv,
1960 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1961 m_adj(m, drop_hdrlen); /* delayed header drop */
1962 sbappendstream_locked(&so->so_rcv, m, 0);
1964 /* NB: sorwakeup_locked() does an implicit unlock. */
1965 sorwakeup_locked(so);
1966 if (DELAY_ACK(tp, tlen)) {
1967 tp->t_flags |= TF_DELACK;
1969 tp->t_flags |= TF_ACKNOW;
1970 tp->t_fb->tfb_tcp_output(tp);
1977 * Calculate amount of space in receive window,
1978 * and then do TCP input processing.
1979 * Receive window is amount of space in rcv queue,
1980 * but not less than advertised window.
1982 win = sbspace(&so->so_rcv);
1985 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1987 switch (tp->t_state) {
1989 * If the state is SYN_RECEIVED:
1990 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1992 case TCPS_SYN_RECEIVED:
1993 if ((thflags & TH_ACK) &&
1994 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1995 SEQ_GT(th->th_ack, tp->snd_max))) {
1996 rstreason = BANDLIM_RST_OPENPORT;
1999 if (IS_FASTOPEN(tp->t_flags)) {
2001 * When a TFO connection is in SYN_RECEIVED, the
2002 * only valid packets are the initial SYN, a
2003 * retransmit/copy of the initial SYN (possibly with
2004 * a subset of the original data), a valid ACK, a
2007 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) {
2008 rstreason = BANDLIM_RST_OPENPORT;
2010 } else if (thflags & TH_SYN) {
2011 /* non-initial SYN is ignored */
2012 if ((tcp_timer_active(tp, TT_DELACK) ||
2013 tcp_timer_active(tp, TT_REXMT)))
2015 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) {
2022 * If the state is SYN_SENT:
2023 * if seg contains a RST with valid ACK (SEQ.ACK has already
2024 * been verified), then drop the connection.
2025 * if seg contains a RST without an ACK, drop the seg.
2026 * if seg does not contain SYN, then drop the seg.
2027 * Otherwise this is an acceptable SYN segment
2028 * initialize tp->rcv_nxt and tp->irs
2029 * if seg contains ack then advance tp->snd_una
2030 * if seg contains an ECE and ECN support is enabled, the stream
2032 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
2033 * arrange for segment to be acked (eventually)
2034 * continue processing rest of data/controls, beginning with URG
2037 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) {
2038 TCP_PROBE5(connect__refused, NULL, tp,
2040 tp = tcp_drop(tp, ECONNREFUSED);
2042 if (thflags & TH_RST)
2044 if (!(thflags & TH_SYN))
2047 tp->irs = th->th_seq;
2049 if (thflags & TH_ACK) {
2050 int tfo_partial_ack = 0;
2052 TCPSTAT_INC(tcps_connects);
2055 mac_socketpeer_set_from_mbuf(m, so);
2057 /* Do window scaling on this connection? */
2058 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2059 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2060 tp->rcv_scale = tp->request_r_scale;
2062 tp->rcv_adv += min(tp->rcv_wnd,
2063 TCP_MAXWIN << tp->rcv_scale);
2064 tp->snd_una++; /* SYN is acked */
2066 * If not all the data that was sent in the TFO SYN
2067 * has been acked, resend the remainder right away.
2069 if (IS_FASTOPEN(tp->t_flags) &&
2070 (tp->snd_una != tp->snd_max)) {
2071 tp->snd_nxt = th->th_ack;
2072 tfo_partial_ack = 1;
2075 * If there's data, delay ACK; if there's also a FIN
2076 * ACKNOW will be turned on later.
2078 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial_ack)
2079 tcp_timer_activate(tp, TT_DELACK,
2082 tp->t_flags |= TF_ACKNOW;
2084 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) &&
2085 (V_tcp_do_ecn == 1)) {
2086 tp->t_flags2 |= TF2_ECN_PERMIT;
2087 TCPSTAT_INC(tcps_ecn_shs);
2091 * Received <SYN,ACK> in SYN_SENT[*] state.
2093 * SYN_SENT --> ESTABLISHED
2094 * SYN_SENT* --> FIN_WAIT_1
2096 tp->t_starttime = ticks;
2097 if (tp->t_flags & TF_NEEDFIN) {
2098 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2099 tp->t_flags &= ~TF_NEEDFIN;
2102 tcp_state_change(tp, TCPS_ESTABLISHED);
2103 TCP_PROBE5(connect__established, NULL, tp,
2106 tcp_timer_activate(tp, TT_KEEP,
2111 * Received initial SYN in SYN-SENT[*] state =>
2112 * simultaneous open.
2113 * If it succeeds, connection is * half-synchronized.
2114 * Otherwise, do 3-way handshake:
2115 * SYN-SENT -> SYN-RECEIVED
2116 * SYN-SENT* -> SYN-RECEIVED*
2118 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
2119 tcp_timer_activate(tp, TT_REXMT, 0);
2120 tcp_state_change(tp, TCPS_SYN_RECEIVED);
2123 INP_WLOCK_ASSERT(tp->t_inpcb);
2126 * Advance th->th_seq to correspond to first data byte.
2127 * If data, trim to stay within window,
2128 * dropping FIN if necessary.
2131 if (tlen > tp->rcv_wnd) {
2132 todrop = tlen - tp->rcv_wnd;
2136 TCPSTAT_INC(tcps_rcvpackafterwin);
2137 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2139 tp->snd_wl1 = th->th_seq - 1;
2140 tp->rcv_up = th->th_seq;
2142 * Client side of transaction: already sent SYN and data.
2143 * If the remote host used T/TCP to validate the SYN,
2144 * our data will be ACK'd; if so, enter normal data segment
2145 * processing in the middle of step 5, ack processing.
2146 * Otherwise, goto step 6.
2148 if (thflags & TH_ACK)
2154 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
2155 * do normal processing.
2157 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
2161 break; /* continue normal processing */
2165 * States other than LISTEN or SYN_SENT.
2166 * First check the RST flag and sequence number since reset segments
2167 * are exempt from the timestamp and connection count tests. This
2168 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2169 * below which allowed reset segments in half the sequence space
2170 * to fall though and be processed (which gives forged reset
2171 * segments with a random sequence number a 50 percent chance of
2172 * killing a connection).
2173 * Then check timestamp, if present.
2174 * Then check the connection count, if present.
2175 * Then check that at least some bytes of segment are within
2176 * receive window. If segment begins before rcv_nxt,
2177 * drop leading data (and SYN); if nothing left, just ack.
2179 if (thflags & TH_RST) {
2181 * RFC5961 Section 3.2
2183 * - RST drops connection only if SEG.SEQ == RCV.NXT.
2184 * - If RST is in window, we send challenge ACK.
2186 * Note: to take into account delayed ACKs, we should
2187 * test against last_ack_sent instead of rcv_nxt.
2188 * Note 2: we handle special case of closed window, not
2189 * covered by the RFC.
2191 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2192 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
2193 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) {
2194 KASSERT(tp->t_state != TCPS_SYN_SENT,
2195 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p",
2198 if (V_tcp_insecure_rst ||
2199 tp->last_ack_sent == th->th_seq) {
2200 TCPSTAT_INC(tcps_drops);
2201 /* Drop the connection. */
2202 switch (tp->t_state) {
2203 case TCPS_SYN_RECEIVED:
2204 so->so_error = ECONNREFUSED;
2206 case TCPS_ESTABLISHED:
2207 case TCPS_FIN_WAIT_1:
2208 case TCPS_FIN_WAIT_2:
2209 case TCPS_CLOSE_WAIT:
2212 so->so_error = ECONNRESET;
2219 TCPSTAT_INC(tcps_badrst);
2220 /* Send challenge ACK. */
2221 tcp_respond(tp, mtod(m, void *), th, m,
2222 tp->rcv_nxt, tp->snd_nxt, TH_ACK);
2223 tp->last_ack_sent = tp->rcv_nxt;
2231 * RFC5961 Section 4.2
2232 * Send challenge ACK for any SYN in synchronized state.
2234 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT &&
2235 tp->t_state != TCPS_SYN_RECEIVED) {
2236 TCPSTAT_INC(tcps_badsyn);
2237 if (V_tcp_insecure_syn &&
2238 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2239 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2240 tp = tcp_drop(tp, ECONNRESET);
2241 rstreason = BANDLIM_UNLIMITED;
2243 /* Send challenge ACK. */
2244 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt,
2245 tp->snd_nxt, TH_ACK);
2246 tp->last_ack_sent = tp->rcv_nxt;
2253 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2254 * and it's less than ts_recent, drop it.
2256 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2257 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2258 /* Check to see if ts_recent is over 24 days old. */
2259 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
2261 * Invalidate ts_recent. If this segment updates
2262 * ts_recent, the age will be reset later and ts_recent
2263 * will get a valid value. If it does not, setting
2264 * ts_recent to zero will at least satisfy the
2265 * requirement that zero be placed in the timestamp
2266 * echo reply when ts_recent isn't valid. The
2267 * age isn't reset until we get a valid ts_recent
2268 * because we don't want out-of-order segments to be
2269 * dropped when ts_recent is old.
2273 TCPSTAT_INC(tcps_rcvduppack);
2274 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2275 TCPSTAT_INC(tcps_pawsdrop);
2283 * In the SYN-RECEIVED state, validate that the packet belongs to
2284 * this connection before trimming the data to fit the receive
2285 * window. Check the sequence number versus IRS since we know
2286 * the sequence numbers haven't wrapped. This is a partial fix
2287 * for the "LAND" DoS attack.
2289 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2290 rstreason = BANDLIM_RST_OPENPORT;
2294 todrop = tp->rcv_nxt - th->th_seq;
2296 if (thflags & TH_SYN) {
2306 * Following if statement from Stevens, vol. 2, p. 960.
2309 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2311 * Any valid FIN must be to the left of the window.
2312 * At this point the FIN must be a duplicate or out
2313 * of sequence; drop it.
2318 * Send an ACK to resynchronize and drop any data.
2319 * But keep on processing for RST or ACK.
2321 tp->t_flags |= TF_ACKNOW;
2323 TCPSTAT_INC(tcps_rcvduppack);
2324 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2326 TCPSTAT_INC(tcps_rcvpartduppack);
2327 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2330 * DSACK - add SACK block for dropped range
2332 if ((todrop > 0) && (tp->t_flags & TF_SACK_PERMIT)) {
2333 tcp_update_sack_list(tp, th->th_seq,
2334 th->th_seq + todrop);
2336 * ACK now, as the next in-sequence segment
2337 * will clear the DSACK block again
2339 tp->t_flags |= TF_ACKNOW;
2341 drop_hdrlen += todrop; /* drop from the top afterwards */
2342 th->th_seq += todrop;
2344 if (th->th_urp > todrop)
2345 th->th_urp -= todrop;
2353 * If new data are received on a connection after the
2354 * user processes are gone, then RST the other end.
2356 if ((so->so_state & SS_NOFDREF) &&
2357 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
2358 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
2359 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data "
2360 "after socket was closed, "
2361 "sending RST and removing tcpcb\n",
2362 s, __func__, tcpstates[tp->t_state], tlen);
2366 TCPSTAT_INC(tcps_rcvafterclose);
2367 rstreason = BANDLIM_UNLIMITED;
2372 * If segment ends after window, drop trailing data
2373 * (and PUSH and FIN); if nothing left, just ACK.
2375 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2377 TCPSTAT_INC(tcps_rcvpackafterwin);
2378 if (todrop >= tlen) {
2379 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2381 * If window is closed can only take segments at
2382 * window edge, and have to drop data and PUSH from
2383 * incoming segments. Continue processing, but
2384 * remember to ack. Otherwise, drop segment
2387 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2388 tp->t_flags |= TF_ACKNOW;
2389 TCPSTAT_INC(tcps_rcvwinprobe);
2393 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2396 thflags &= ~(TH_PUSH|TH_FIN);
2400 * If last ACK falls within this segment's sequence numbers,
2401 * record its timestamp.
2403 * 1) That the test incorporates suggestions from the latest
2404 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2405 * 2) That updating only on newer timestamps interferes with
2406 * our earlier PAWS tests, so this check should be solely
2407 * predicated on the sequence space of this segment.
2408 * 3) That we modify the segment boundary check to be
2409 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2410 * instead of RFC1323's
2411 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2412 * This modified check allows us to overcome RFC1323's
2413 * limitations as described in Stevens TCP/IP Illustrated
2414 * Vol. 2 p.869. In such cases, we can still calculate the
2415 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2417 if ((to.to_flags & TOF_TS) != 0 &&
2418 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2419 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2420 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2421 tp->ts_recent_age = tcp_ts_getticks();
2422 tp->ts_recent = to.to_tsval;
2426 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2427 * flag is on (half-synchronized state), then queue data for
2428 * later processing; else drop segment and return.
2430 if ((thflags & TH_ACK) == 0) {
2431 if (tp->t_state == TCPS_SYN_RECEIVED ||
2432 (tp->t_flags & TF_NEEDSYN)) {
2433 if (tp->t_state == TCPS_SYN_RECEIVED &&
2434 IS_FASTOPEN(tp->t_flags)) {
2435 tp->snd_wnd = tiwin;
2439 } else if (tp->t_flags & TF_ACKNOW)
2448 switch (tp->t_state) {
2450 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2451 * ESTABLISHED state and continue processing.
2452 * The ACK was checked above.
2454 case TCPS_SYN_RECEIVED:
2456 TCPSTAT_INC(tcps_connects);
2458 /* Do window scaling? */
2459 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2460 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2461 tp->rcv_scale = tp->request_r_scale;
2463 tp->snd_wnd = tiwin;
2466 * SYN-RECEIVED -> ESTABLISHED
2467 * SYN-RECEIVED* -> FIN-WAIT-1
2469 tp->t_starttime = ticks;
2470 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
2471 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
2472 tp->t_tfo_pending = NULL;
2474 if (tp->t_flags & TF_NEEDFIN) {
2475 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2476 tp->t_flags &= ~TF_NEEDFIN;
2478 tcp_state_change(tp, TCPS_ESTABLISHED);
2479 TCP_PROBE5(accept__established, NULL, tp,
2482 * TFO connections call cc_conn_init() during SYN
2483 * processing. Calling it again here for such
2484 * connections is not harmless as it would undo the
2485 * snd_cwnd reduction that occurs when a TFO SYN|ACK
2488 if (!IS_FASTOPEN(tp->t_flags))
2490 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
2493 * Account for the ACK of our SYN prior to
2494 * regular ACK processing below, except for
2495 * simultaneous SYN, which is handled later.
2497 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
2500 * If segment contains data or ACK, will call tcp_reass()
2501 * later; if not, do so now to pass queued data to user.
2503 if (tlen == 0 && (thflags & TH_FIN) == 0) {
2504 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
2506 tcp_handle_wakeup(tp, so);
2508 tp->snd_wl1 = th->th_seq - 1;
2512 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2513 * ACKs. If the ack is in the range
2514 * tp->snd_una < th->th_ack <= tp->snd_max
2515 * then advance tp->snd_una to th->th_ack and drop
2516 * data from the retransmission queue. If this ACK reflects
2517 * more up to date window information we update our window information.
2519 case TCPS_ESTABLISHED:
2520 case TCPS_FIN_WAIT_1:
2521 case TCPS_FIN_WAIT_2:
2522 case TCPS_CLOSE_WAIT:
2525 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2526 TCPSTAT_INC(tcps_rcvacktoomuch);
2529 if ((tp->t_flags & TF_SACK_PERMIT) &&
2530 ((to.to_flags & TOF_SACK) ||
2531 !TAILQ_EMPTY(&tp->snd_holes))) {
2532 if (((sack_changed = tcp_sack_doack(tp, &to, th->th_ack)) != 0) &&
2533 (tp->t_flags & TF_LRD)) {
2534 tcp_sack_lost_retransmission(tp, th);
2538 * Reset the value so that previous (valid) value
2539 * from the last ack with SACK doesn't get used.
2541 tp->sackhint.sacked_bytes = 0;
2544 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2545 hhook_run_tcp_est_in(tp, th, &to);
2548 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2549 maxseg = tcp_maxseg(tp);
2551 (tiwin == tp->snd_wnd ||
2552 (tp->t_flags & TF_SACK_PERMIT))) {
2554 * If this is the first time we've seen a
2555 * FIN from the remote, this is not a
2556 * duplicate and it needs to be processed
2557 * normally. This happens during a
2558 * simultaneous close.
2560 if ((thflags & TH_FIN) &&
2561 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2565 TCPSTAT_INC(tcps_rcvdupack);
2567 * If we have outstanding data (other than
2568 * a window probe), this is a completely
2569 * duplicate ack (ie, window info didn't
2570 * change and FIN isn't set),
2571 * the ack is the biggest we've
2572 * seen and we've seen exactly our rexmt
2573 * threshold of them, assume a packet
2574 * has been dropped and retransmit it.
2575 * Kludge snd_nxt & the congestion
2576 * window so we send only this one
2579 * We know we're losing at the current
2580 * window size so do congestion avoidance
2581 * (set ssthresh to half the current window
2582 * and pull our congestion window back to
2583 * the new ssthresh).
2585 * Dup acks mean that packets have left the
2586 * network (they're now cached at the receiver)
2587 * so bump cwnd by the amount in the receiver
2588 * to keep a constant cwnd packets in the
2591 * When using TCP ECN, notify the peer that
2592 * we reduced the cwnd.
2595 * Following 2 kinds of acks should not affect
2598 * 2) Acks with SACK but without any new SACK
2599 * information in them. These could result from
2600 * any anomaly in the network like a switch
2601 * duplicating packets or a possible DoS attack.
2603 if (th->th_ack != tp->snd_una ||
2604 ((tp->t_flags & TF_SACK_PERMIT) &&
2605 (to.to_flags & TOF_SACK) &&
2608 else if (!tcp_timer_active(tp, TT_REXMT))
2610 else if (++tp->t_dupacks > tcprexmtthresh ||
2611 IN_FASTRECOVERY(tp->t_flags)) {
2612 cc_ack_received(tp, th, nsegs,
2615 IN_FASTRECOVERY(tp->t_flags)) {
2616 tcp_do_prr_ack(tp, th, &to);
2617 } else if ((tp->t_flags & TF_SACK_PERMIT) &&
2618 (to.to_flags & TOF_SACK) &&
2619 IN_FASTRECOVERY(tp->t_flags)) {
2623 * Compute the amount of data in flight first.
2624 * We can inject new data into the pipe iff
2625 * we have less than 1/2 the original window's
2626 * worth of data in flight.
2628 if (V_tcp_do_newsack)
2629 awnd = tcp_compute_pipe(tp);
2631 awnd = (tp->snd_nxt - tp->snd_fack) +
2632 tp->sackhint.sack_bytes_rexmit;
2634 if (awnd < tp->snd_ssthresh) {
2635 tp->snd_cwnd += maxseg;
2636 if (tp->snd_cwnd > tp->snd_ssthresh)
2637 tp->snd_cwnd = tp->snd_ssthresh;
2640 tp->snd_cwnd += maxseg;
2641 (void) tp->t_fb->tfb_tcp_output(tp);
2643 } else if (tp->t_dupacks == tcprexmtthresh ||
2644 (tp->t_flags & TF_SACK_PERMIT &&
2646 tp->sackhint.sacked_bytes >
2647 (tcprexmtthresh - 1) * maxseg)) {
2650 * Above is the RFC6675 trigger condition of
2651 * more than (dupthresh-1)*maxseg sacked data.
2652 * If the count of holes in the
2653 * scoreboard is >= dupthresh, we could
2654 * also enter loss recovery, but don't
2655 * have that value readily available.
2657 tp->t_dupacks = tcprexmtthresh;
2658 tcp_seq onxt = tp->snd_nxt;
2661 * If we're doing sack, or prr, check
2662 * to see if we're already in sack
2663 * recovery. If we're not doing sack,
2664 * check to see if we're in newreno
2668 (tp->t_flags & TF_SACK_PERMIT)) {
2669 if (IN_FASTRECOVERY(tp->t_flags)) {
2674 if (SEQ_LEQ(th->th_ack,
2680 /* Congestion signal before ack. */
2681 cc_cong_signal(tp, th, CC_NDUPACK);
2682 cc_ack_received(tp, th, nsegs,
2684 tcp_timer_activate(tp, TT_REXMT, 0);
2688 * snd_ssthresh is already updated by
2691 if ((tp->t_flags & TF_SACK_PERMIT) &&
2692 (to.to_flags & TOF_SACK)) {
2693 tp->sackhint.prr_delivered =
2694 tp->sackhint.sacked_bytes;
2696 tp->sackhint.prr_delivered =
2697 imin(tp->snd_max - tp->snd_una,
2698 imin(INT_MAX / 65536,
2699 tp->t_dupacks) * maxseg);
2701 tp->sackhint.recover_fs = max(1,
2702 tp->snd_nxt - tp->snd_una);
2704 if ((tp->t_flags & TF_SACK_PERMIT) &&
2705 (to.to_flags & TOF_SACK)) {
2707 tcps_sack_recovery_episode);
2708 tp->snd_recover = tp->snd_nxt;
2709 tp->snd_cwnd = maxseg;
2710 (void) tp->t_fb->tfb_tcp_output(tp);
2711 if (SEQ_GT(th->th_ack, tp->snd_una))
2712 goto resume_partialack;
2715 tp->snd_nxt = th->th_ack;
2716 tp->snd_cwnd = maxseg;
2717 (void) tp->t_fb->tfb_tcp_output(tp);
2718 KASSERT(tp->snd_limited <= 2,
2719 ("%s: tp->snd_limited too big",
2721 tp->snd_cwnd = tp->snd_ssthresh +
2723 (tp->t_dupacks - tp->snd_limited);
2724 if (SEQ_GT(onxt, tp->snd_nxt))
2727 } else if (V_tcp_do_rfc3042) {
2729 * Process first and second duplicate
2730 * ACKs. Each indicates a segment
2731 * leaving the network, creating room
2732 * for more. Make sure we can send a
2733 * packet on reception of each duplicate
2734 * ACK by increasing snd_cwnd by one
2735 * segment. Restore the original
2736 * snd_cwnd after packet transmission.
2738 cc_ack_received(tp, th, nsegs,
2740 uint32_t oldcwnd = tp->snd_cwnd;
2741 tcp_seq oldsndmax = tp->snd_max;
2745 KASSERT(tp->t_dupacks == 1 ||
2747 ("%s: dupacks not 1 or 2",
2749 if (tp->t_dupacks == 1)
2750 tp->snd_limited = 0;
2752 (tp->snd_nxt - tp->snd_una) +
2753 (tp->t_dupacks - tp->snd_limited) *
2756 * Only call tcp_output when there
2757 * is new data available to be sent.
2758 * Otherwise we would send pure ACKs.
2760 SOCKBUF_LOCK(&so->so_snd);
2761 avail = sbavail(&so->so_snd) -
2762 (tp->snd_nxt - tp->snd_una);
2763 SOCKBUF_UNLOCK(&so->so_snd);
2765 (void) tp->t_fb->tfb_tcp_output(tp);
2766 sent = tp->snd_max - oldsndmax;
2767 if (sent > maxseg) {
2768 KASSERT((tp->t_dupacks == 2 &&
2769 tp->snd_limited == 0) ||
2770 (sent == maxseg + 1 &&
2771 tp->t_flags & TF_SENTFIN),
2772 ("%s: sent too much",
2774 tp->snd_limited = 2;
2775 } else if (sent > 0)
2777 tp->snd_cwnd = oldcwnd;
2784 * This ack is advancing the left edge, reset the
2789 * If this ack also has new SACK info, increment the
2790 * counter as per rfc6675. The variable
2791 * sack_changed tracks all changes to the SACK
2792 * scoreboard, including when partial ACKs without
2793 * SACK options are received, and clear the scoreboard
2794 * from the left side. Such partial ACKs should not be
2795 * counted as dupacks here.
2797 if ((tp->t_flags & TF_SACK_PERMIT) &&
2798 (to.to_flags & TOF_SACK) &&
2801 /* limit overhead by setting maxseg last */
2802 if (!IN_FASTRECOVERY(tp->t_flags) &&
2803 (tp->sackhint.sacked_bytes >
2804 ((tcprexmtthresh - 1) *
2805 (maxseg = tcp_maxseg(tp))))) {
2806 goto enter_recovery;
2812 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2813 ("%s: th_ack <= snd_una", __func__));
2816 * If the congestion window was inflated to account
2817 * for the other side's cached packets, retract it.
2819 if (IN_FASTRECOVERY(tp->t_flags)) {
2820 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2821 if (tp->t_flags & TF_SACK_PERMIT)
2822 if (V_tcp_do_prr && to.to_flags & TOF_SACK) {
2823 tcp_timer_activate(tp, TT_REXMT, 0);
2825 tcp_do_prr_ack(tp, th, &to);
2826 tp->t_flags |= TF_ACKNOW;
2827 (void) tcp_output(tp);
2829 tcp_sack_partialack(tp, th);
2831 tcp_newreno_partial_ack(tp, th);
2833 cc_post_recovery(tp, th);
2834 } else if (IN_CONGRECOVERY(tp->t_flags)) {
2835 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2837 tp->sackhint.delivered_data = BYTES_THIS_ACK(tp, th);
2838 tp->snd_fack = th->th_ack;
2839 tcp_do_prr_ack(tp, th, &to);
2840 (void) tcp_output(tp);
2843 cc_post_recovery(tp, th);
2846 * If we reach this point, ACK is not a duplicate,
2847 * i.e., it ACKs something we sent.
2849 if (tp->t_flags & TF_NEEDSYN) {
2851 * T/TCP: Connection was half-synchronized, and our
2852 * SYN has been ACK'd (so connection is now fully
2853 * synchronized). Go to non-starred state,
2854 * increment snd_una for ACK of SYN, and check if
2855 * we can do window scaling.
2857 tp->t_flags &= ~TF_NEEDSYN;
2859 /* Do window scaling? */
2860 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2861 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2862 tp->rcv_scale = tp->request_r_scale;
2863 /* Send window already scaled. */
2868 INP_WLOCK_ASSERT(tp->t_inpcb);
2871 * Adjust for the SYN bit in sequence space,
2872 * but don't account for it in cwnd calculations.
2873 * This is for the SYN_RECEIVED, non-simultaneous
2874 * SYN case. SYN_SENT and simultaneous SYN are
2875 * treated elsewhere.
2879 acked = BYTES_THIS_ACK(tp, th);
2880 KASSERT(acked >= 0, ("%s: acked unexepectedly negative "
2881 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__,
2882 tp->snd_una, th->th_ack, tp, m));
2883 TCPSTAT_ADD(tcps_rcvackpack, nsegs);
2884 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2887 * If we just performed our first retransmit, and the ACK
2888 * arrives within our recovery window, then it was a mistake
2889 * to do the retransmit in the first place. Recover our
2890 * original cwnd and ssthresh, and proceed to transmit where
2893 if (tp->t_rxtshift == 1 &&
2894 tp->t_flags & TF_PREVVALID &&
2896 SEQ_LT(to.to_tsecr, tp->t_badrxtwin))
2897 cc_cong_signal(tp, th, CC_RTO_ERR);
2900 * If we have a timestamp reply, update smoothed
2901 * round trip time. If no timestamp is present but
2902 * transmit timer is running and timed sequence
2903 * number was acked, update smoothed round trip time.
2904 * Since we now have an rtt measurement, cancel the
2905 * timer backoff (cf., Phil Karn's retransmit alg.).
2906 * Recompute the initial retransmit timer.
2908 * Some boxes send broken timestamp replies
2909 * during the SYN+ACK phase, ignore
2910 * timestamps of 0 or we could calculate a
2911 * huge RTT and blow up the retransmit timer.
2913 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
2916 t = tcp_ts_getticks() - to.to_tsecr;
2917 if (!tp->t_rttlow || tp->t_rttlow > t)
2919 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1);
2920 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2921 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2922 tp->t_rttlow = ticks - tp->t_rtttime;
2923 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2927 * If all outstanding data is acked, stop retransmit
2928 * timer and remember to restart (more output or persist).
2929 * If there is more data to be acked, restart retransmit
2930 * timer, using current (possibly backed-off) value.
2932 if (th->th_ack == tp->snd_max) {
2933 tcp_timer_activate(tp, TT_REXMT, 0);
2935 } else if (!tcp_timer_active(tp, TT_PERSIST))
2936 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2939 * If no data (only SYN) was ACK'd,
2940 * skip rest of ACK processing.
2946 * Let the congestion control algorithm update congestion
2947 * control related information. This typically means increasing
2948 * the congestion window.
2950 cc_ack_received(tp, th, nsegs, CC_ACK);
2952 SOCKBUF_LOCK(&so->so_snd);
2953 if (acked > sbavail(&so->so_snd)) {
2954 if (tp->snd_wnd >= sbavail(&so->so_snd))
2955 tp->snd_wnd -= sbavail(&so->so_snd);
2958 mfree = sbcut_locked(&so->so_snd,
2959 (int)sbavail(&so->so_snd));
2962 mfree = sbcut_locked(&so->so_snd, acked);
2963 if (tp->snd_wnd >= (uint32_t) acked)
2964 tp->snd_wnd -= acked;
2969 /* NB: sowwakeup_locked() does an implicit unlock. */
2970 sowwakeup_locked(so);
2972 /* Detect una wraparound. */
2973 if (!IN_RECOVERY(tp->t_flags) &&
2974 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2975 SEQ_LEQ(th->th_ack, tp->snd_recover))
2976 tp->snd_recover = th->th_ack - 1;
2977 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2978 if (IN_RECOVERY(tp->t_flags) &&
2979 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2980 EXIT_RECOVERY(tp->t_flags);
2982 tp->snd_una = th->th_ack;
2983 if (tp->t_flags & TF_SACK_PERMIT) {
2984 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2985 tp->snd_recover = tp->snd_una;
2987 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2988 tp->snd_nxt = tp->snd_una;
2990 switch (tp->t_state) {
2992 * In FIN_WAIT_1 STATE in addition to the processing
2993 * for the ESTABLISHED state if our FIN is now acknowledged
2994 * then enter FIN_WAIT_2.
2996 case TCPS_FIN_WAIT_1:
2997 if (ourfinisacked) {
2999 * If we can't receive any more
3000 * data, then closing user can proceed.
3001 * Starting the timer is contrary to the
3002 * specification, but if we don't get a FIN
3003 * we'll hang forever.
3006 * we should release the tp also, and use a
3009 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3010 soisdisconnected(so);
3011 tcp_timer_activate(tp, TT_2MSL,
3012 (tcp_fast_finwait2_recycle ?
3013 tcp_finwait2_timeout :
3016 tcp_state_change(tp, TCPS_FIN_WAIT_2);
3021 * In CLOSING STATE in addition to the processing for
3022 * the ESTABLISHED state if the ACK acknowledges our FIN
3023 * then enter the TIME-WAIT state, otherwise ignore
3027 if (ourfinisacked) {
3035 * In LAST_ACK, we may still be waiting for data to drain
3036 * and/or to be acked, as well as for the ack of our FIN.
3037 * If our FIN is now acknowledged, delete the TCB,
3038 * enter the closed state and return.
3041 if (ourfinisacked) {
3050 INP_WLOCK_ASSERT(tp->t_inpcb);
3053 * Update window information.
3054 * Don't look at window if no ACK: TAC's send garbage on first SYN.
3056 if ((thflags & TH_ACK) &&
3057 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
3058 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
3059 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
3060 /* keep track of pure window updates */
3062 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
3063 TCPSTAT_INC(tcps_rcvwinupd);
3064 tp->snd_wnd = tiwin;
3065 tp->snd_wl1 = th->th_seq;
3066 tp->snd_wl2 = th->th_ack;
3067 if (tp->snd_wnd > tp->max_sndwnd)
3068 tp->max_sndwnd = tp->snd_wnd;
3073 * Process segments with URG.
3075 if ((thflags & TH_URG) && th->th_urp &&
3076 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3078 * This is a kludge, but if we receive and accept
3079 * random urgent pointers, we'll crash in
3080 * soreceive. It's hard to imagine someone
3081 * actually wanting to send this much urgent data.
3083 SOCKBUF_LOCK(&so->so_rcv);
3084 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
3085 th->th_urp = 0; /* XXX */
3086 thflags &= ~TH_URG; /* XXX */
3087 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
3088 goto dodata; /* XXX */
3091 * If this segment advances the known urgent pointer,
3092 * then mark the data stream. This should not happen
3093 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
3094 * a FIN has been received from the remote side.
3095 * In these states we ignore the URG.
3097 * According to RFC961 (Assigned Protocols),
3098 * the urgent pointer points to the last octet
3099 * of urgent data. We continue, however,
3100 * to consider it to indicate the first octet
3101 * of data past the urgent section as the original
3102 * spec states (in one of two places).
3104 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
3105 tp->rcv_up = th->th_seq + th->th_urp;
3106 so->so_oobmark = sbavail(&so->so_rcv) +
3107 (tp->rcv_up - tp->rcv_nxt) - 1;
3108 if (so->so_oobmark == 0)
3109 so->so_rcv.sb_state |= SBS_RCVATMARK;
3111 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
3113 SOCKBUF_UNLOCK(&so->so_rcv);
3115 * Remove out of band data so doesn't get presented to user.
3116 * This can happen independent of advancing the URG pointer,
3117 * but if two URG's are pending at once, some out-of-band
3118 * data may creep in... ick.
3120 if (th->th_urp <= (uint32_t)tlen &&
3121 !(so->so_options & SO_OOBINLINE)) {
3122 /* hdr drop is delayed */
3123 tcp_pulloutofband(so, th, m, drop_hdrlen);
3127 * If no out of band data is expected,
3128 * pull receive urgent pointer along
3129 * with the receive window.
3131 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
3132 tp->rcv_up = tp->rcv_nxt;
3135 INP_WLOCK_ASSERT(tp->t_inpcb);
3138 * Process the segment text, merging it into the TCP sequencing queue,
3139 * and arranging for acknowledgment of receipt if necessary.
3140 * This process logically involves adjusting tp->rcv_wnd as data
3141 * is presented to the user (this happens in tcp_usrreq.c,
3142 * case PRU_RCVD). If a FIN has already been received on this
3143 * connection then we just ignore the text.
3145 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
3146 IS_FASTOPEN(tp->t_flags));
3147 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
3148 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3149 tcp_seq save_start = th->th_seq;
3150 tcp_seq save_rnxt = tp->rcv_nxt;
3151 int save_tlen = tlen;
3152 m_adj(m, drop_hdrlen); /* delayed header drop */
3154 * Insert segment which includes th into TCP reassembly queue
3155 * with control block tp. Set thflags to whether reassembly now
3156 * includes a segment with FIN. This handles the common case
3157 * inline (segment is the next to be received on an established
3158 * connection, and the queue is empty), avoiding linkage into
3159 * and removal from the queue and repetition of various
3161 * Set DELACK for segments received in order, but ack
3162 * immediately when segments are out of order (so
3163 * fast retransmit can work).
3165 if (th->th_seq == tp->rcv_nxt &&
3167 (TCPS_HAVEESTABLISHED(tp->t_state) ||
3169 if (DELAY_ACK(tp, tlen) || tfo_syn)
3170 tp->t_flags |= TF_DELACK;
3172 tp->t_flags |= TF_ACKNOW;
3173 tp->rcv_nxt += tlen;
3175 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
3176 (tp->t_fbyte_in == 0)) {
3177 tp->t_fbyte_in = ticks;
3178 if (tp->t_fbyte_in == 0)
3180 if (tp->t_fbyte_out && tp->t_fbyte_in)
3181 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
3183 thflags = th->th_flags & TH_FIN;
3184 TCPSTAT_INC(tcps_rcvpack);
3185 TCPSTAT_ADD(tcps_rcvbyte, tlen);
3186 SOCKBUF_LOCK(&so->so_rcv);
3187 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
3190 sbappendstream_locked(&so->so_rcv, m, 0);
3191 tp->t_flags |= TF_WAKESOR;
3194 * XXX: Due to the header drop above "th" is
3195 * theoretically invalid by now. Fortunately
3196 * m_adj() doesn't actually frees any mbufs
3197 * when trimming from the head.
3199 tcp_seq temp = save_start;
3201 thflags = tcp_reass(tp, th, &temp, &tlen, m);
3202 tp->t_flags |= TF_ACKNOW;
3204 if ((tp->t_flags & TF_SACK_PERMIT) &&
3206 TCPS_HAVEESTABLISHED(tp->t_state)) {
3207 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
3209 * DSACK actually handled in the fastpath
3212 tcp_update_sack_list(tp, save_start,
3213 save_start + save_tlen);
3214 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
3215 if ((tp->rcv_numsacks >= 1) &&
3216 (tp->sackblks[0].end == save_start)) {
3218 * Partial overlap, recorded at todrop
3221 tcp_update_sack_list(tp,
3222 tp->sackblks[0].start,
3223 tp->sackblks[0].end);
3225 tcp_update_dsack_list(tp, save_start,
3226 save_start + save_tlen);
3228 } else if (tlen >= save_tlen) {
3229 /* Update of sackblks. */
3230 tcp_update_dsack_list(tp, save_start,
3231 save_start + save_tlen);
3232 } else if (tlen > 0) {
3233 tcp_update_dsack_list(tp, save_start,
3237 tcp_handle_wakeup(tp, so);
3240 * Note the amount of data that peer has sent into
3241 * our window, in order to estimate the sender's
3245 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
3246 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
3248 len = so->so_rcv.sb_hiwat;
3256 * If FIN is received ACK the FIN and let the user know
3257 * that the connection is closing.
3259 if (thflags & TH_FIN) {
3260 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3261 /* The socket upcall is handled by socantrcvmore. */
3264 * If connection is half-synchronized
3265 * (ie NEEDSYN flag on) then delay ACK,
3266 * so it may be piggybacked when SYN is sent.
3267 * Otherwise, since we received a FIN then no
3268 * more input can be expected, send ACK now.
3270 if (tp->t_flags & TF_NEEDSYN)
3271 tp->t_flags |= TF_DELACK;
3273 tp->t_flags |= TF_ACKNOW;
3276 switch (tp->t_state) {
3278 * In SYN_RECEIVED and ESTABLISHED STATES
3279 * enter the CLOSE_WAIT state.
3281 case TCPS_SYN_RECEIVED:
3282 tp->t_starttime = ticks;
3284 case TCPS_ESTABLISHED:
3285 tcp_state_change(tp, TCPS_CLOSE_WAIT);
3289 * If still in FIN_WAIT_1 STATE FIN has not been acked so
3290 * enter the CLOSING state.
3292 case TCPS_FIN_WAIT_1:
3293 tcp_state_change(tp, TCPS_CLOSING);
3297 * In FIN_WAIT_2 state enter the TIME_WAIT state,
3298 * starting the time-wait timer, turning off the other
3301 case TCPS_FIN_WAIT_2:
3307 if (so->so_options & SO_DEBUG)
3308 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
3311 TCP_PROBE3(debug__input, tp, th, m);
3314 * Return any desired output.
3316 if (needoutput || (tp->t_flags & TF_ACKNOW))
3317 (void) tp->t_fb->tfb_tcp_output(tp);
3320 INP_WLOCK_ASSERT(tp->t_inpcb);
3322 if (tp->t_flags & TF_DELACK) {
3323 tp->t_flags &= ~TF_DELACK;
3324 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
3326 INP_WUNLOCK(tp->t_inpcb);
3331 * Generate an ACK dropping incoming segment if it occupies
3332 * sequence space, where the ACK reflects our state.
3334 * We can now skip the test for the RST flag since all
3335 * paths to this code happen after packets containing
3336 * RST have been dropped.
3338 * In the SYN-RECEIVED state, don't send an ACK unless the
3339 * segment we received passes the SYN-RECEIVED ACK test.
3340 * If it fails send a RST. This breaks the loop in the
3341 * "LAND" DoS attack, and also prevents an ACK storm
3342 * between two listening ports that have been sent forged
3343 * SYN segments, each with the source address of the other.
3345 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3346 (SEQ_GT(tp->snd_una, th->th_ack) ||
3347 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3348 rstreason = BANDLIM_RST_OPENPORT;
3352 if (so->so_options & SO_DEBUG)
3353 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3356 TCP_PROBE3(debug__input, tp, th, m);
3357 tp->t_flags |= TF_ACKNOW;
3358 (void) tp->t_fb->tfb_tcp_output(tp);
3359 INP_WUNLOCK(tp->t_inpcb);
3365 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3366 INP_WUNLOCK(tp->t_inpcb);
3368 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3373 * Drop space held by incoming segment and return.
3376 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3377 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3380 TCP_PROBE3(debug__input, tp, th, m);
3382 INP_WUNLOCK(tp->t_inpcb);
3388 * Issue RST and make ACK acceptable to originator of segment.
3389 * The mbuf must still include the original packet header.
3393 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3394 int tlen, int rstreason)
3400 struct ip6_hdr *ip6;
3404 INP_LOCK_ASSERT(tp->t_inpcb);
3407 /* Don't bother if destination was broadcast/multicast. */
3408 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3411 if (mtod(m, struct ip *)->ip_v == 6) {
3412 ip6 = mtod(m, struct ip6_hdr *);
3413 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3414 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3416 /* IPv6 anycast check is done at tcp6_input() */
3419 #if defined(INET) && defined(INET6)
3424 ip = mtod(m, struct ip *);
3425 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3426 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3427 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3428 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3433 /* Perform bandwidth limiting. */
3434 if (badport_bandlim(rstreason) < 0)
3437 /* tcp_respond consumes the mbuf chain. */
3438 if (th->th_flags & TH_ACK) {
3439 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3440 th->th_ack, TH_RST);
3442 if (th->th_flags & TH_SYN)
3444 if (th->th_flags & TH_FIN)
3446 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3447 (tcp_seq)0, TH_RST|TH_ACK);
3455 * Parse TCP options and place in tcpopt.
3458 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3463 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3465 if (opt == TCPOPT_EOL)
3467 if (opt == TCPOPT_NOP)
3473 if (optlen < 2 || optlen > cnt)
3478 if (optlen != TCPOLEN_MAXSEG)
3480 if (!(flags & TO_SYN))
3482 to->to_flags |= TOF_MSS;
3483 bcopy((char *)cp + 2,
3484 (char *)&to->to_mss, sizeof(to->to_mss));
3485 to->to_mss = ntohs(to->to_mss);
3488 if (optlen != TCPOLEN_WINDOW)
3490 if (!(flags & TO_SYN))
3492 to->to_flags |= TOF_SCALE;
3493 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3495 case TCPOPT_TIMESTAMP:
3496 if (optlen != TCPOLEN_TIMESTAMP)
3498 to->to_flags |= TOF_TS;
3499 bcopy((char *)cp + 2,
3500 (char *)&to->to_tsval, sizeof(to->to_tsval));
3501 to->to_tsval = ntohl(to->to_tsval);
3502 bcopy((char *)cp + 6,
3503 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3504 to->to_tsecr = ntohl(to->to_tsecr);
3506 case TCPOPT_SIGNATURE:
3508 * In order to reply to a host which has set the
3509 * TCP_SIGNATURE option in its initial SYN, we have
3510 * to record the fact that the option was observed
3511 * here for the syncache code to perform the correct
3514 if (optlen != TCPOLEN_SIGNATURE)
3516 to->to_flags |= TOF_SIGNATURE;
3517 to->to_signature = cp + 2;
3519 case TCPOPT_SACK_PERMITTED:
3520 if (optlen != TCPOLEN_SACK_PERMITTED)
3522 if (!(flags & TO_SYN))
3526 to->to_flags |= TOF_SACKPERM;
3529 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3533 to->to_flags |= TOF_SACK;
3534 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3535 to->to_sacks = cp + 2;
3536 TCPSTAT_INC(tcps_sack_rcv_blocks);
3538 case TCPOPT_FAST_OPEN:
3540 * Cookie length validation is performed by the
3541 * server side cookie checking code or the client
3542 * side cookie cache update code.
3544 if (!(flags & TO_SYN))
3546 if (!V_tcp_fastopen_client_enable &&
3547 !V_tcp_fastopen_server_enable)
3549 to->to_flags |= TOF_FASTOPEN;
3550 to->to_tfo_len = optlen - 2;
3551 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL;
3560 * Pull out of band byte out of a segment so
3561 * it doesn't appear in the user's data queue.
3562 * It is still reflected in the segment length for
3563 * sequencing purposes.
3566 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3569 int cnt = off + th->th_urp - 1;
3572 if (m->m_len > cnt) {
3573 char *cp = mtod(m, caddr_t) + cnt;
3574 struct tcpcb *tp = sototcpcb(so);
3576 INP_WLOCK_ASSERT(tp->t_inpcb);
3579 tp->t_oobflags |= TCPOOB_HAVEDATA;
3580 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3582 if (m->m_flags & M_PKTHDR)
3591 panic("tcp_pulloutofband");
3595 * Collect new round-trip time estimate
3596 * and update averages and current timeout.
3599 tcp_xmit_timer(struct tcpcb *tp, int rtt)
3603 INP_WLOCK_ASSERT(tp->t_inpcb);
3605 TCPSTAT_INC(tcps_rttupdated);
3608 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT,
3609 imax(0, rtt * 1000 / hz));
3611 if ((tp->t_srtt != 0) && (tp->t_rxtshift <= TCP_RTT_INVALIDATE)) {
3613 * srtt is stored as fixed point with 5 bits after the
3614 * binary point (i.e., scaled by 8). The following magic
3615 * is equivalent to the smoothing algorithm in rfc793 with
3616 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3617 * point). Adjust rtt to origin 0.
3619 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3620 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3622 if ((tp->t_srtt += delta) <= 0)
3626 * We accumulate a smoothed rtt variance (actually, a
3627 * smoothed mean difference), then set the retransmit
3628 * timer to smoothed rtt + 4 times the smoothed variance.
3629 * rttvar is stored as fixed point with 4 bits after the
3630 * binary point (scaled by 16). The following is
3631 * equivalent to rfc793 smoothing with an alpha of .75
3632 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3633 * rfc793's wired-in beta.
3637 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3638 if ((tp->t_rttvar += delta) <= 0)
3640 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3641 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3644 * No rtt measurement yet - use the unsmoothed rtt.
3645 * Set the variance to half the rtt (so our first
3646 * retransmit happens at 3*rtt).
3648 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3649 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3650 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3656 * the retransmit should happen at rtt + 4 * rttvar.
3657 * Because of the way we do the smoothing, srtt and rttvar
3658 * will each average +1/2 tick of bias. When we compute
3659 * the retransmit timer, we want 1/2 tick of rounding and
3660 * 1 extra tick because of +-1/2 tick uncertainty in the
3661 * firing of the timer. The bias will give us exactly the
3662 * 1.5 tick we need. But, because the bias is
3663 * statistical, we have to test that we don't drop below
3664 * the minimum feasible timer (which is 2 ticks).
3666 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3667 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3670 * We received an ack for a packet that wasn't retransmitted;
3671 * it is probably safe to discard any error indications we've
3672 * received recently. This isn't quite right, but close enough
3673 * for now (a route might have failed after we sent a segment,
3674 * and the return path might not be symmetrical).
3676 tp->t_softerror = 0;
3680 * Determine a reasonable value for maxseg size.
3681 * If the route is known, check route for mtu.
3682 * If none, use an mss that can be handled on the outgoing interface
3683 * without forcing IP to fragment. If no route is found, route has no mtu,
3684 * or the destination isn't local, use a default, hopefully conservative
3685 * size (usually 512 or the default IP max size, but no more than the mtu
3686 * of the interface), as we can't discover anything about intervening
3687 * gateways or networks. We also initialize the congestion/slow start
3688 * window to be a single segment if the destination isn't local.
3689 * While looking at the routing entry, we also initialize other path-dependent
3690 * parameters from pre-set or cached values in the routing entry.
3692 * NOTE that resulting t_maxseg doesn't include space for TCP options or
3693 * IP options, e.g. IPSEC data, since length of this data may vary, and
3694 * thus it is calculated for every segment separately in tcp_output().
3696 * NOTE that this routine is only called when we process an incoming
3697 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS
3698 * settings are handled in tcp_mssopt().
3701 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
3702 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
3705 uint32_t maxmtu = 0;
3706 struct inpcb *inp = tp->t_inpcb;
3707 struct hc_metrics_lite metrics;
3709 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3710 size_t min_protoh = isipv6 ?
3711 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3712 sizeof (struct tcpiphdr);
3714 size_t min_protoh = sizeof(struct tcpiphdr);
3717 INP_WLOCK_ASSERT(tp->t_inpcb);
3720 min_protoh += V_tcp_udp_tunneling_overhead;
3721 if (mtuoffer != -1) {
3722 KASSERT(offer == -1, ("%s: conflict", __func__));
3723 offer = mtuoffer - min_protoh;
3729 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap);
3730 tp->t_maxseg = V_tcp_v6mssdflt;
3733 #if defined(INET) && defined(INET6)
3738 maxmtu = tcp_maxmtu(&inp->inp_inc, cap);
3739 tp->t_maxseg = V_tcp_mssdflt;
3744 * No route to sender, stay with default mss and return.
3748 * In case we return early we need to initialize metrics
3749 * to a defined state as tcp_hc_get() would do for us
3750 * if there was no cache hit.
3752 if (metricptr != NULL)
3753 bzero(metricptr, sizeof(struct hc_metrics_lite));
3757 /* What have we got? */
3761 * Offer == 0 means that there was no MSS on the SYN
3762 * segment, in this case we use tcp_mssdflt as
3763 * already assigned to t_maxseg above.
3765 offer = tp->t_maxseg;
3770 * Offer == -1 means that we didn't receive SYN yet.
3776 * Prevent DoS attack with too small MSS. Round up
3777 * to at least minmss.
3779 offer = max(offer, V_tcp_minmss);
3783 * rmx information is now retrieved from tcp_hostcache.
3785 tcp_hc_get(&inp->inp_inc, &metrics);
3786 if (metricptr != NULL)
3787 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3790 * If there's a discovered mtu in tcp hostcache, use it.
3791 * Else, use the link mtu.
3793 if (metrics.rmx_mtu)
3794 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3798 mss = maxmtu - min_protoh;
3799 if (!V_path_mtu_discovery &&
3800 !in6_localaddr(&inp->in6p_faddr))
3801 mss = min(mss, V_tcp_v6mssdflt);
3804 #if defined(INET) && defined(INET6)
3809 mss = maxmtu - min_protoh;
3810 if (!V_path_mtu_discovery &&
3811 !in_localaddr(inp->inp_faddr))
3812 mss = min(mss, V_tcp_mssdflt);
3816 * XXX - The above conditional (mss = maxmtu - min_protoh)
3817 * probably violates the TCP spec.
3818 * The problem is that, since we don't know the
3819 * other end's MSS, we are supposed to use a conservative
3820 * default. But, if we do that, then MTU discovery will
3821 * never actually take place, because the conservative
3822 * default is much less than the MTUs typically seen
3823 * on the Internet today. For the moment, we'll sweep
3824 * this under the carpet.
3826 * The conservative default might not actually be a problem
3827 * if the only case this occurs is when sending an initial
3828 * SYN with options and data to a host we've never talked
3829 * to before. Then, they will reply with an MSS value which
3830 * will get recorded and the new parameters should get
3831 * recomputed. For Further Study.
3834 mss = min(mss, offer);
3837 * Sanity check: make sure that maxseg will be large
3838 * enough to allow some data on segments even if the
3839 * all the option space is used (40bytes). Otherwise
3840 * funny things may happen in tcp_output.
3842 * XXXGL: shouldn't we reserve space for IP/IPv6 options?
3850 tcp_mss(struct tcpcb *tp, int offer)
3856 struct hc_metrics_lite metrics;
3857 struct tcp_ifcap cap;
3859 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3861 bzero(&cap, sizeof(cap));
3862 tcp_mss_update(tp, offer, -1, &metrics, &cap);
3868 * If there's a pipesize, change the socket buffer to that size,
3869 * don't change if sb_hiwat is different than default (then it
3870 * has been changed on purpose with setsockopt).
3871 * Make the socket buffers an integral number of mss units;
3872 * if the mss is larger than the socket buffer, decrease the mss.
3874 so = inp->inp_socket;
3875 SOCKBUF_LOCK(&so->so_snd);
3876 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe)
3877 bufsize = metrics.rmx_sendpipe;
3879 bufsize = so->so_snd.sb_hiwat;
3883 bufsize = roundup(bufsize, mss);
3884 if (bufsize > sb_max)
3886 if (bufsize > so->so_snd.sb_hiwat)
3887 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3889 SOCKBUF_UNLOCK(&so->so_snd);
3891 * Sanity check: make sure that maxseg will be large
3892 * enough to allow some data on segments even if the
3893 * all the option space is used (40bytes). Otherwise
3894 * funny things may happen in tcp_output.
3896 * XXXGL: shouldn't we reserve space for IP/IPv6 options?
3898 tp->t_maxseg = max(mss, 64);
3900 SOCKBUF_LOCK(&so->so_rcv);
3901 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
3902 bufsize = metrics.rmx_recvpipe;
3904 bufsize = so->so_rcv.sb_hiwat;
3905 if (bufsize > mss) {
3906 bufsize = roundup(bufsize, mss);
3907 if (bufsize > sb_max)
3909 if (bufsize > so->so_rcv.sb_hiwat)
3910 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3912 SOCKBUF_UNLOCK(&so->so_rcv);
3914 /* Check the interface for TSO capabilities. */
3915 if (cap.ifcap & CSUM_TSO) {
3916 tp->t_flags |= TF_TSO;
3917 tp->t_tsomax = cap.tsomax;
3918 tp->t_tsomaxsegcount = cap.tsomaxsegcount;
3919 tp->t_tsomaxsegsize = cap.tsomaxsegsize;
3924 * Determine the MSS option to send on an outgoing SYN.
3927 tcp_mssopt(struct in_conninfo *inc)
3930 uint32_t thcmtu = 0;
3931 uint32_t maxmtu = 0;
3934 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3937 if (inc->inc_flags & INC_ISIPV6) {
3938 mss = V_tcp_v6mssdflt;
3939 maxmtu = tcp_maxmtu6(inc, NULL);
3940 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3943 #if defined(INET) && defined(INET6)
3948 mss = V_tcp_mssdflt;
3949 maxmtu = tcp_maxmtu(inc, NULL);
3950 min_protoh = sizeof(struct tcpiphdr);
3953 #if defined(INET6) || defined(INET)
3954 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3957 if (maxmtu && thcmtu)
3958 mss = min(maxmtu, thcmtu) - min_protoh;
3959 else if (maxmtu || thcmtu)
3960 mss = max(maxmtu, thcmtu) - min_protoh;
3966 tcp_do_prr_ack(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
3968 int snd_cnt = 0, limit = 0, del_data = 0, pipe = 0;
3969 int maxseg = tcp_maxseg(tp);
3971 INP_WLOCK_ASSERT(tp->t_inpcb);
3974 * Compute the amount of data that this ACK is indicating
3975 * (del_data) and an estimate of how many bytes are in the
3978 if (((tp->t_flags & TF_SACK_PERMIT) &&
3979 (to->to_flags & TOF_SACK)) ||
3980 (IN_CONGRECOVERY(tp->t_flags) &&
3981 !IN_FASTRECOVERY(tp->t_flags))) {
3982 del_data = tp->sackhint.delivered_data;
3983 if (V_tcp_do_newsack)
3984 pipe = tcp_compute_pipe(tp);
3986 pipe = (tp->snd_nxt - tp->snd_fack) +
3987 tp->sackhint.sack_bytes_rexmit;
3989 if (tp->sackhint.prr_delivered < (tcprexmtthresh * maxseg +
3990 tp->snd_recover - tp->snd_una))
3992 pipe = imax(0, tp->snd_max - tp->snd_una -
3993 imin(INT_MAX / 65536, tp->t_dupacks) * maxseg);
3995 tp->sackhint.prr_delivered += del_data;
3997 * Proportional Rate Reduction
3999 if (pipe >= tp->snd_ssthresh) {
4000 if (tp->sackhint.recover_fs == 0)
4001 tp->sackhint.recover_fs =
4002 imax(1, tp->snd_nxt - tp->snd_una);
4003 snd_cnt = howmany((long)tp->sackhint.prr_delivered *
4004 tp->snd_ssthresh, tp->sackhint.recover_fs) -
4005 tp->sackhint.prr_out;
4007 if (V_tcp_do_prr_conservative || (del_data == 0))
4008 limit = tp->sackhint.prr_delivered -
4009 tp->sackhint.prr_out;
4011 limit = imax(tp->sackhint.prr_delivered -
4012 tp->sackhint.prr_out, del_data) +
4014 snd_cnt = imin((tp->snd_ssthresh - pipe), limit);
4016 snd_cnt = imax(snd_cnt, 0) / maxseg;
4018 * Send snd_cnt new data into the network in response to this ack.
4019 * If there is going to be a SACK retransmission, adjust snd_cwnd
4022 if (IN_FASTRECOVERY(tp->t_flags)) {
4023 if ((tp->t_flags & TF_SACK_PERMIT) &&
4024 (to->to_flags & TOF_SACK)) {
4025 tp->snd_cwnd = tp->snd_nxt - tp->snd_recover +
4026 tp->sackhint.sack_bytes_rexmit +
4029 tp->snd_cwnd = (tp->snd_max - tp->snd_una) +
4032 } else if (IN_CONGRECOVERY(tp->t_flags))
4033 tp->snd_cwnd = pipe - del_data + (snd_cnt * maxseg);
4034 tp->snd_cwnd = imax(maxseg, tp->snd_cwnd);
4038 * On a partial ack arrives, force the retransmission of the
4039 * next unacknowledged segment. Do not clear tp->t_dupacks.
4040 * By setting snd_nxt to ti_ack, this forces retransmission timer to
4044 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
4046 tcp_seq onxt = tp->snd_nxt;
4047 uint32_t ocwnd = tp->snd_cwnd;
4048 u_int maxseg = tcp_maxseg(tp);
4050 INP_WLOCK_ASSERT(tp->t_inpcb);
4052 tcp_timer_activate(tp, TT_REXMT, 0);
4054 tp->snd_nxt = th->th_ack;
4056 * Set snd_cwnd to one segment beyond acknowledged offset.
4057 * (tp->snd_una has not yet been updated when this function is called.)
4059 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th);
4060 tp->t_flags |= TF_ACKNOW;
4061 (void) tp->t_fb->tfb_tcp_output(tp);
4062 tp->snd_cwnd = ocwnd;
4063 if (SEQ_GT(onxt, tp->snd_nxt))
4066 * Partial window deflation. Relies on fact that tp->snd_una
4069 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
4070 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
4073 tp->snd_cwnd += maxseg;
4077 tcp_compute_pipe(struct tcpcb *tp)
4079 return (tp->snd_max - tp->snd_una +
4080 tp->sackhint.sack_bytes_rexmit -
4081 tp->sackhint.sacked_bytes);
4085 tcp_compute_initwnd(uint32_t maxseg)
4088 * Calculate the Initial Window, also used as Restart Window
4090 * RFC5681 Section 3.1 specifies the default conservative values.
4091 * RFC3390 specifies slightly more aggressive values.
4092 * RFC6928 increases it to ten segments.
4093 * Support for user specified value for initial flight size.
4095 if (V_tcp_initcwnd_segments)
4096 return min(V_tcp_initcwnd_segments * maxseg,
4097 max(2 * maxseg, V_tcp_initcwnd_segments * 1460));
4098 else if (V_tcp_do_rfc3390)
4099 return min(4 * maxseg, max(2 * maxseg, 4380));
4101 /* Per RFC5681 Section 3.1 */
4103 return (2 * maxseg);
4104 else if (maxseg > 1095)
4105 return (3 * maxseg);
4107 return (4 * maxseg);