2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2007-2008,2010
5 * Swinburne University of Technology, Melbourne, Australia.
6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
7 * Copyright (c) 2010 The FreeBSD Foundation
8 * Copyright (c) 2010-2011 Juniper Networks, Inc.
11 * Portions of this software were developed at the Centre for Advanced Internet
12 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
13 * James Healy and David Hayes, made possible in part by a grant from the Cisco
14 * University Research Program Fund at Community Foundation Silicon Valley.
16 * Portions of this software were developed at the Centre for Advanced
17 * Internet Architectures, Swinburne University of Technology, Melbourne,
18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
20 * Portions of this software were developed by Robert N. M. Watson under
21 * contract to Juniper Networks, Inc.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 4. Neither the name of the University nor the names of its contributors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
53 #include "opt_ipfw.h" /* for ipfw_fwd */
55 #include "opt_inet6.h"
56 #include "opt_ipsec.h"
57 #include "opt_tcpdebug.h"
59 #include <sys/param.h>
60 #include <sys/kernel.h>
61 #include <sys/hhook.h>
62 #include <sys/malloc.h>
64 #include <sys/proc.h> /* for proc0 declaration */
65 #include <sys/protosw.h>
67 #include <sys/signalvar.h>
68 #include <sys/socket.h>
69 #include <sys/socketvar.h>
70 #include <sys/sysctl.h>
71 #include <sys/syslog.h>
72 #include <sys/systm.h>
74 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
79 #include <net/if_var.h>
80 #include <net/route.h>
83 #define TCPSTATES /* for logging */
85 #include <netinet/cc.h>
86 #include <netinet/in.h>
87 #include <netinet/in_kdtrace.h>
88 #include <netinet/in_pcb.h>
89 #include <netinet/in_systm.h>
90 #include <netinet/in_var.h>
91 #include <netinet/ip.h>
92 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
93 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
94 #include <netinet/ip_var.h>
95 #include <netinet/ip_options.h>
96 #include <netinet/ip6.h>
97 #include <netinet/icmp6.h>
98 #include <netinet6/in6_pcb.h>
99 #include <netinet6/ip6_var.h>
100 #include <netinet6/nd6.h>
101 #include <netinet/tcp_fsm.h>
102 #include <netinet/tcp_seq.h>
103 #include <netinet/tcp_timer.h>
104 #include <netinet/tcp_var.h>
105 #include <netinet6/tcp6_var.h>
106 #include <netinet/tcpip.h>
107 #include <netinet/tcp_syncache.h>
109 #include <netinet/tcp_debug.h>
110 #endif /* TCPDEBUG */
112 #include <netinet/tcp_offload.h>
116 #include <netipsec/ipsec.h>
117 #include <netipsec/ipsec6.h>
120 #include <machine/in_cksum.h>
122 #include <security/mac/mac_framework.h>
124 const int tcprexmtthresh = 3;
126 int tcp_log_in_vain = 0;
127 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
129 "Log all incoming TCP segments to closed ports");
131 VNET_DEFINE(int, blackhole) = 0;
132 #define V_blackhole VNET(blackhole)
133 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW,
134 &VNET_NAME(blackhole), 0,
135 "Do not send RST on segments to closed ports");
137 VNET_DEFINE(int, tcp_delack_enabled) = 1;
138 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW,
139 &VNET_NAME(tcp_delack_enabled), 0,
140 "Delay ACK to try and piggyback it onto a data packet");
142 VNET_DEFINE(int, drop_synfin) = 0;
143 #define V_drop_synfin VNET(drop_synfin)
144 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW,
145 &VNET_NAME(drop_synfin), 0,
146 "Drop TCP packets with SYN+FIN set");
148 VNET_DEFINE(int, tcp_do_rfc3042) = 1;
149 #define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042)
150 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW,
151 &VNET_NAME(tcp_do_rfc3042), 0,
152 "Enable RFC 3042 (Limited Transmit)");
154 VNET_DEFINE(int, tcp_do_rfc3390) = 1;
155 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW,
156 &VNET_NAME(tcp_do_rfc3390), 0,
157 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
159 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, experimental, CTLFLAG_RW, 0,
160 "Experimental TCP extensions");
162 VNET_DEFINE(int, tcp_do_initcwnd10) = 1;
163 SYSCTL_INT(_net_inet_tcp_experimental, OID_AUTO, initcwnd10, CTLFLAG_VNET | CTLFLAG_RW,
164 &VNET_NAME(tcp_do_initcwnd10), 0,
165 "Enable RFC 6928 (Increasing initial CWND to 10)");
167 VNET_DEFINE(int, tcp_do_rfc3465) = 1;
168 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW,
169 &VNET_NAME(tcp_do_rfc3465), 0,
170 "Enable RFC 3465 (Appropriate Byte Counting)");
172 VNET_DEFINE(int, tcp_abc_l_var) = 2;
173 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW,
174 &VNET_NAME(tcp_abc_l_var), 2,
175 "Cap the max cwnd increment during slow-start to this number of segments");
177 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
179 VNET_DEFINE(int, tcp_do_ecn) = 0;
180 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
181 &VNET_NAME(tcp_do_ecn), 0,
184 VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
185 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_VNET | CTLFLAG_RW,
186 &VNET_NAME(tcp_ecn_maxretries), 0,
187 "Max retries before giving up on ECN");
189 VNET_DEFINE(int, tcp_insecure_syn) = 0;
190 #define V_tcp_insecure_syn VNET(tcp_insecure_syn)
191 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW,
192 &VNET_NAME(tcp_insecure_syn), 0,
193 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets");
195 VNET_DEFINE(int, tcp_insecure_rst) = 0;
196 #define V_tcp_insecure_rst VNET(tcp_insecure_rst)
197 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW,
198 &VNET_NAME(tcp_insecure_rst), 0,
199 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets");
201 VNET_DEFINE(int, tcp_recvspace) = 1024*64;
202 #define V_tcp_recvspace VNET(tcp_recvspace)
203 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW,
204 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size");
206 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
207 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
208 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
209 &VNET_NAME(tcp_do_autorcvbuf), 0,
210 "Enable automatic receive buffer sizing");
212 VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024;
213 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
214 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_VNET | CTLFLAG_RW,
215 &VNET_NAME(tcp_autorcvbuf_inc), 0,
216 "Incrementor step size of automatic receive buffer");
218 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
219 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
220 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
221 &VNET_NAME(tcp_autorcvbuf_max), 0,
222 "Max size of automatic receive buffer");
224 VNET_DEFINE(struct inpcbhead, tcb);
225 #define tcb6 tcb /* for KAME src sync over BSD*'s */
226 VNET_DEFINE(struct inpcbinfo, tcbinfo);
228 static void tcp_dooptions(struct tcpopt *, u_char *, int, int);
229 static void tcp_do_segment(struct mbuf *, struct tcphdr *,
230 struct socket *, struct tcpcb *, int, int, uint8_t,
232 static void tcp_dropwithreset(struct mbuf *, struct tcphdr *,
233 struct tcpcb *, int, int);
234 static void tcp_pulloutofband(struct socket *,
235 struct tcphdr *, struct mbuf *, int);
236 static void tcp_xmit_timer(struct tcpcb *, int);
237 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
238 static void inline cc_ack_received(struct tcpcb *tp, struct tcphdr *th,
240 static void inline cc_conn_init(struct tcpcb *tp);
241 static void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th);
242 static void inline hhook_run_tcp_est_in(struct tcpcb *tp,
243 struct tcphdr *th, struct tcpopt *to);
246 * TCP statistics are stored in an "array" of counter(9)s.
248 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat);
249 VNET_PCPUSTAT_SYSINIT(tcpstat);
250 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat,
251 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
254 VNET_PCPUSTAT_SYSUNINIT(tcpstat);
257 * Kernel module interface for updating tcpstat. The argument is an index
258 * into tcpstat treated as an array.
261 kmod_tcpstat_inc(int statnum)
264 counter_u64_add(VNET(tcpstat)[statnum], 1);
268 * Wrapper for the TCP established input helper hook.
271 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
273 struct tcp_hhook_data hhook_data;
275 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
280 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
286 * CC wrapper hook functions
289 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type)
291 INP_WLOCK_ASSERT(tp->t_inpcb);
293 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
294 if (tp->snd_cwnd <= tp->snd_wnd)
295 tp->ccv->flags |= CCF_CWND_LIMITED;
297 tp->ccv->flags &= ~CCF_CWND_LIMITED;
299 if (type == CC_ACK) {
300 if (tp->snd_cwnd > tp->snd_ssthresh) {
301 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
302 V_tcp_abc_l_var * tp->t_maxseg);
303 if (tp->t_bytes_acked >= tp->snd_cwnd) {
304 tp->t_bytes_acked -= tp->snd_cwnd;
305 tp->ccv->flags |= CCF_ABC_SENTAWND;
308 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
309 tp->t_bytes_acked = 0;
313 if (CC_ALGO(tp)->ack_received != NULL) {
314 /* XXXLAS: Find a way to live without this */
315 tp->ccv->curack = th->th_ack;
316 CC_ALGO(tp)->ack_received(tp->ccv, type);
321 cc_conn_init(struct tcpcb *tp)
323 struct hc_metrics_lite metrics;
324 struct inpcb *inp = tp->t_inpcb;
327 INP_WLOCK_ASSERT(tp->t_inpcb);
329 tcp_hc_get(&inp->inp_inc, &metrics);
331 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
333 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
334 TCPSTAT_INC(tcps_usedrtt);
335 if (metrics.rmx_rttvar) {
336 tp->t_rttvar = metrics.rmx_rttvar;
337 TCPSTAT_INC(tcps_usedrttvar);
339 /* default variation is +- 1 rtt */
341 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
343 TCPT_RANGESET(tp->t_rxtcur,
344 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
345 tp->t_rttmin, TCPTV_REXMTMAX);
347 if (metrics.rmx_ssthresh) {
349 * There's some sort of gateway or interface
350 * buffer limit on the path. Use this to set
351 * the slow start threshhold, but set the
352 * threshold to no less than 2*mss.
354 tp->snd_ssthresh = max(2 * tp->t_maxseg, metrics.rmx_ssthresh);
355 TCPSTAT_INC(tcps_usedssthresh);
359 * Set the initial slow-start flight size.
361 * RFC5681 Section 3.1 specifies the default conservative values.
362 * RFC3390 specifies slightly more aggressive values.
363 * RFC6928 increases it to ten segments.
365 * If a SYN or SYN/ACK was lost and retransmitted, we have to
366 * reduce the initial CWND to one segment as congestion is likely
367 * requiring us to be cautious.
369 if (tp->snd_cwnd == 1)
370 tp->snd_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
371 else if (V_tcp_do_initcwnd10)
372 tp->snd_cwnd = min(10 * tp->t_maxseg,
373 max(2 * tp->t_maxseg, 14600));
374 else if (V_tcp_do_rfc3390)
375 tp->snd_cwnd = min(4 * tp->t_maxseg,
376 max(2 * tp->t_maxseg, 4380));
378 /* Per RFC5681 Section 3.1 */
379 if (tp->t_maxseg > 2190)
380 tp->snd_cwnd = 2 * tp->t_maxseg;
381 else if (tp->t_maxseg > 1095)
382 tp->snd_cwnd = 3 * tp->t_maxseg;
384 tp->snd_cwnd = 4 * tp->t_maxseg;
387 if (CC_ALGO(tp)->conn_init != NULL)
388 CC_ALGO(tp)->conn_init(tp->ccv);
392 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
394 INP_WLOCK_ASSERT(tp->t_inpcb);
398 if (!IN_FASTRECOVERY(tp->t_flags)) {
399 tp->snd_recover = tp->snd_max;
400 if (tp->t_flags & TF_ECN_PERMIT)
401 tp->t_flags |= TF_ECN_SND_CWR;
405 if (!IN_CONGRECOVERY(tp->t_flags)) {
406 TCPSTAT_INC(tcps_ecn_rcwnd);
407 tp->snd_recover = tp->snd_max;
408 if (tp->t_flags & TF_ECN_PERMIT)
409 tp->t_flags |= TF_ECN_SND_CWR;
414 tp->t_bytes_acked = 0;
415 EXIT_RECOVERY(tp->t_flags);
416 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
417 tp->t_maxseg) * tp->t_maxseg;
418 tp->snd_cwnd = tp->t_maxseg;
421 TCPSTAT_INC(tcps_sndrexmitbad);
422 /* RTO was unnecessary, so reset everything. */
423 tp->snd_cwnd = tp->snd_cwnd_prev;
424 tp->snd_ssthresh = tp->snd_ssthresh_prev;
425 tp->snd_recover = tp->snd_recover_prev;
426 if (tp->t_flags & TF_WASFRECOVERY)
427 ENTER_FASTRECOVERY(tp->t_flags);
428 if (tp->t_flags & TF_WASCRECOVERY)
429 ENTER_CONGRECOVERY(tp->t_flags);
430 tp->snd_nxt = tp->snd_max;
431 tp->t_flags &= ~TF_PREVVALID;
436 if (CC_ALGO(tp)->cong_signal != NULL) {
438 tp->ccv->curack = th->th_ack;
439 CC_ALGO(tp)->cong_signal(tp->ccv, type);
444 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
446 INP_WLOCK_ASSERT(tp->t_inpcb);
448 /* XXXLAS: KASSERT that we're in recovery? */
450 if (CC_ALGO(tp)->post_recovery != NULL) {
451 tp->ccv->curack = th->th_ack;
452 CC_ALGO(tp)->post_recovery(tp->ccv);
454 /* XXXLAS: EXIT_RECOVERY ? */
455 tp->t_bytes_acked = 0;
460 tcp_signature_verify_input(struct mbuf *m, int off0, int tlen, int optlen,
461 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag)
465 tcp_fields_to_net(th);
466 ret = tcp_signature_verify(m, off0, tlen, optlen, to, th, tcpbflag);
467 tcp_fields_to_host(th);
472 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
474 #define ND6_HINT(tp) \
476 if ((tp) && (tp)->t_inpcb && \
477 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
478 nd6_nud_hint(NULL, NULL, 0); \
485 * Indicate whether this ack should be delayed. We can delay the ack if
486 * following conditions are met:
487 * - There is no delayed ack timer in progress.
488 * - Our last ack wasn't a 0-sized window. We never want to delay
489 * the ack that opens up a 0-sized window.
490 * - LRO wasn't used for this segment. We make sure by checking that the
491 * segment size is not larger than the MSS.
492 * - Delayed acks are enabled or this is a half-synchronized T/TCP
495 #define DELAY_ACK(tp, tlen) \
496 ((!tcp_timer_active(tp, TT_DELACK) && \
497 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
498 (tlen <= tp->t_maxopd) && \
499 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
502 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos)
504 INP_WLOCK_ASSERT(tp->t_inpcb);
506 if (CC_ALGO(tp)->ecnpkt_handler != NULL) {
507 switch (iptos & IPTOS_ECN_MASK) {
509 tp->ccv->flags |= CCF_IPHDR_CE;
512 tp->ccv->flags &= ~CCF_IPHDR_CE;
515 tp->ccv->flags &= ~CCF_IPHDR_CE;
519 if (th->th_flags & TH_CWR)
520 tp->ccv->flags |= CCF_TCPHDR_CWR;
522 tp->ccv->flags &= ~CCF_TCPHDR_CWR;
524 if (tp->t_flags & TF_DELACK)
525 tp->ccv->flags |= CCF_DELACK;
527 tp->ccv->flags &= ~CCF_DELACK;
529 CC_ALGO(tp)->ecnpkt_handler(tp->ccv);
531 if (tp->ccv->flags & CCF_ACKNOW)
532 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
537 * TCP input handling is split into multiple parts:
538 * tcp6_input is a thin wrapper around tcp_input for the extended
539 * ip6_protox[] call format in ip6_input
540 * tcp_input handles primary segment validation, inpcb lookup and
541 * SYN processing on listen sockets
542 * tcp_do_segment processes the ACK and text of the segment for
543 * establishing, established and closing connections
547 tcp6_input(struct mbuf **mp, int *offp, int proto)
549 struct mbuf *m = *mp;
550 struct in6_ifaddr *ia6;
553 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
556 * draft-itojun-ipv6-tcp-to-anycast
557 * better place to put this in?
559 ip6 = mtod(m, struct ip6_hdr *);
560 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
561 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
564 ifa_free(&ia6->ia_ifa);
565 ip6 = mtod(m, struct ip6_hdr *);
566 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
567 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
568 return (IPPROTO_DONE);
571 ifa_free(&ia6->ia_ifa);
573 return (tcp_input(mp, offp, proto));
578 tcp_input(struct mbuf **mp, int *offp, int proto)
580 struct mbuf *m = *mp;
581 struct tcphdr *th = NULL;
582 struct ip *ip = NULL;
583 struct inpcb *inp = NULL;
584 struct tcpcb *tp = NULL;
585 struct socket *so = NULL;
595 int rstreason = 0; /* For badport_bandlim accounting purposes */
597 uint8_t sig_checked = 0;
600 struct m_tag *fwd_tag = NULL;
602 struct ip6_hdr *ip6 = NULL;
605 const void *ip6 = NULL;
607 struct tcpopt to; /* options in this segment */
608 char *s = NULL; /* address and port logging */
610 #define TI_UNLOCKED 1
615 * The size of tcp_saveipgen must be the size of the max ip header,
618 u_char tcp_saveipgen[IP6_HDR_LEN];
619 struct tcphdr tcp_savetcp;
624 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
631 TCPSTAT_INC(tcps_rcvtotal);
635 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
637 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) {
638 m = m_pullup(m, sizeof(*ip6) + sizeof(*th));
640 TCPSTAT_INC(tcps_rcvshort);
641 return (IPPROTO_DONE);
645 ip6 = mtod(m, struct ip6_hdr *);
646 th = (struct tcphdr *)((caddr_t)ip6 + off0);
647 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
648 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
649 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
650 th->th_sum = m->m_pkthdr.csum_data;
652 th->th_sum = in6_cksum_pseudo(ip6, tlen,
653 IPPROTO_TCP, m->m_pkthdr.csum_data);
654 th->th_sum ^= 0xffff;
656 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen);
658 TCPSTAT_INC(tcps_rcvbadsum);
663 * Be proactive about unspecified IPv6 address in source.
664 * As we use all-zero to indicate unbounded/unconnected pcb,
665 * unspecified IPv6 address can be used to confuse us.
667 * Note that packets with unspecified IPv6 destination is
668 * already dropped in ip6_input.
670 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
676 #if defined(INET) && defined(INET6)
682 * Get IP and TCP header together in first mbuf.
683 * Note: IP leaves IP header in first mbuf.
685 if (off0 > sizeof (struct ip)) {
687 off0 = sizeof(struct ip);
689 if (m->m_len < sizeof (struct tcpiphdr)) {
690 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
692 TCPSTAT_INC(tcps_rcvshort);
693 return (IPPROTO_DONE);
696 ip = mtod(m, struct ip *);
697 th = (struct tcphdr *)((caddr_t)ip + off0);
698 tlen = ntohs(ip->ip_len) - off0;
700 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
701 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
702 th->th_sum = m->m_pkthdr.csum_data;
704 th->th_sum = in_pseudo(ip->ip_src.s_addr,
706 htonl(m->m_pkthdr.csum_data + tlen +
708 th->th_sum ^= 0xffff;
710 struct ipovly *ipov = (struct ipovly *)ip;
713 * Checksum extended TCP header and data.
716 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
717 ipov->ih_len = htons(tlen);
718 th->th_sum = in_cksum(m, len);
719 /* Reset length for SDT probes. */
720 ip->ip_len = htons(tlen + off0);
724 TCPSTAT_INC(tcps_rcvbadsum);
727 /* Re-initialization for later version check */
728 ip->ip_v = IPVERSION;
734 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
736 #if defined(INET) && defined(INET6)
744 * Check that TCP offset makes sense,
745 * pull out TCP options and adjust length. XXX
747 off = th->th_off << 2;
748 if (off < sizeof (struct tcphdr) || off > tlen) {
749 TCPSTAT_INC(tcps_rcvbadoff);
752 tlen -= off; /* tlen is used instead of ti->ti_len */
753 if (off > sizeof (struct tcphdr)) {
756 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE);
757 ip6 = mtod(m, struct ip6_hdr *);
758 th = (struct tcphdr *)((caddr_t)ip6 + off0);
761 #if defined(INET) && defined(INET6)
766 if (m->m_len < sizeof(struct ip) + off) {
767 if ((m = m_pullup(m, sizeof (struct ip) + off))
769 TCPSTAT_INC(tcps_rcvshort);
770 return (IPPROTO_DONE);
772 ip = mtod(m, struct ip *);
773 th = (struct tcphdr *)((caddr_t)ip + off0);
777 optlen = off - sizeof (struct tcphdr);
778 optp = (u_char *)(th + 1);
780 thflags = th->th_flags;
783 * Convert TCP protocol specific fields to host format.
785 tcp_fields_to_host(th);
788 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
790 drop_hdrlen = off0 + off;
793 * Locate pcb for segment; if we're likely to add or remove a
794 * connection then first acquire pcbinfo lock. There are three cases
795 * where we might discover later we need a write lock despite the
796 * flags: ACKs moving a connection out of the syncache, ACKs for a
797 * connection in TIMEWAIT and SYNs not targeting a listening socket.
799 if ((thflags & (TH_FIN | TH_RST)) != 0) {
800 INP_INFO_RLOCK(&V_tcbinfo);
801 ti_locked = TI_RLOCKED;
803 ti_locked = TI_UNLOCKED;
806 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
810 (isipv6 && (m->m_flags & M_IP6_NEXTHOP))
812 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP))
815 #if defined(INET) && !defined(INET6)
816 (m->m_flags & M_IP_NEXTHOP)
819 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
823 if (ti_locked == TI_RLOCKED) {
824 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
826 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
830 if (isipv6 && fwd_tag != NULL) {
831 struct sockaddr_in6 *next_hop6;
833 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
835 * Transparently forwarded. Pretend to be the destination.
836 * Already got one like this?
838 inp = in6_pcblookup_mbuf(&V_tcbinfo,
839 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
840 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m);
843 * It's new. Try to find the ambushing socket.
844 * Because we've rewritten the destination address,
845 * any hardware-generated hash is ignored.
847 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
848 th->th_sport, &next_hop6->sin6_addr,
849 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
850 th->th_dport, INPLOOKUP_WILDCARD |
851 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
854 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
855 th->th_sport, &ip6->ip6_dst, th->th_dport,
856 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
857 m->m_pkthdr.rcvif, m);
860 #if defined(INET6) && defined(INET)
864 if (fwd_tag != NULL) {
865 struct sockaddr_in *next_hop;
867 next_hop = (struct sockaddr_in *)(fwd_tag+1);
869 * Transparently forwarded. Pretend to be the destination.
870 * already got one like this?
872 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
873 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB,
874 m->m_pkthdr.rcvif, m);
877 * It's new. Try to find the ambushing socket.
878 * Because we've rewritten the destination address,
879 * any hardware-generated hash is ignored.
881 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
882 th->th_sport, next_hop->sin_addr,
883 next_hop->sin_port ? ntohs(next_hop->sin_port) :
884 th->th_dport, INPLOOKUP_WILDCARD |
885 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
888 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
889 th->th_sport, ip->ip_dst, th->th_dport,
890 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
891 m->m_pkthdr.rcvif, m);
895 * If the INPCB does not exist then all data in the incoming
896 * segment is discarded and an appropriate RST is sent back.
897 * XXX MRT Send RST using which routing table?
901 * Log communication attempts to ports that are not
904 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
905 tcp_log_in_vain == 2) {
906 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
907 log(LOG_INFO, "%s; %s: Connection attempt "
908 "to closed port\n", s, __func__);
911 * When blackholing do not respond with a RST but
912 * completely ignore the segment and drop it.
914 if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
918 rstreason = BANDLIM_RST_CLOSEDPORT;
921 INP_WLOCK_ASSERT(inp);
922 if ((inp->inp_flowtype == M_HASHTYPE_NONE) &&
923 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) &&
924 ((inp->inp_socket == NULL) ||
925 (inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) {
926 inp->inp_flowid = m->m_pkthdr.flowid;
927 inp->inp_flowtype = M_HASHTYPE_GET(m);
931 if (isipv6 && ipsec6_in_reject(m, inp)) {
935 if (ipsec4_in_reject(m, inp) != 0) {
941 * Check the minimum TTL for socket.
943 if (inp->inp_ip_minttl != 0) {
945 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim)
949 if (inp->inp_ip_minttl > ip->ip_ttl)
954 * A previous connection in TIMEWAIT state is supposed to catch stray
955 * or duplicate segments arriving late. If this segment was a
956 * legitimate new connection attempt, the old INPCB gets removed and
957 * we can try again to find a listening socket.
959 * At this point, due to earlier optimism, we may hold only an inpcb
960 * lock, and not the inpcbinfo write lock. If so, we need to try to
961 * acquire it, or if that fails, acquire a reference on the inpcb,
962 * drop all locks, acquire a global write lock, and then re-acquire
963 * the inpcb lock. We may at that point discover that another thread
964 * has tried to free the inpcb, in which case we need to loop back
965 * and try to find a new inpcb to deliver to.
967 * XXXRW: It may be time to rethink timewait locking.
970 if (inp->inp_flags & INP_TIMEWAIT) {
971 if (ti_locked == TI_UNLOCKED) {
972 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) {
975 INP_INFO_RLOCK(&V_tcbinfo);
976 ti_locked = TI_RLOCKED;
978 if (in_pcbrele_wlocked(inp)) {
983 ti_locked = TI_RLOCKED;
985 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
987 if (thflags & TH_SYN)
988 tcp_dooptions(&to, optp, optlen, TO_SYN);
990 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
992 if (tcp_twcheck(inp, &to, th, m, tlen))
994 INP_INFO_RUNLOCK(&V_tcbinfo);
995 return (IPPROTO_DONE);
998 * The TCPCB may no longer exist if the connection is winding
999 * down or it is in the CLOSED state. Either way we drop the
1000 * segment and send an appropriate response.
1002 tp = intotcpcb(inp);
1003 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
1004 rstreason = BANDLIM_RST_CLOSEDPORT;
1009 if (tp->t_flags & TF_TOE) {
1010 tcp_offload_input(tp, m);
1011 m = NULL; /* consumed by the TOE driver */
1017 * We've identified a valid inpcb, but it could be that we need an
1018 * inpcbinfo write lock but don't hold it. In this case, attempt to
1019 * acquire using the same strategy as the TIMEWAIT case above. If we
1020 * relock, we have to jump back to 'relocked' as the connection might
1021 * now be in TIMEWAIT.
1024 if ((thflags & (TH_FIN | TH_RST)) != 0)
1025 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1027 if (!((tp->t_state == TCPS_ESTABLISHED && (thflags & TH_SYN) == 0) ||
1028 (tp->t_state == TCPS_LISTEN && (thflags & TH_SYN)))) {
1029 if (ti_locked == TI_UNLOCKED) {
1030 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) {
1033 INP_INFO_RLOCK(&V_tcbinfo);
1034 ti_locked = TI_RLOCKED;
1036 if (in_pcbrele_wlocked(inp)) {
1042 ti_locked = TI_RLOCKED;
1044 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1048 INP_WLOCK_ASSERT(inp);
1049 if (mac_inpcb_check_deliver(inp, m))
1052 so = inp->inp_socket;
1053 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1055 if (so->so_options & SO_DEBUG) {
1056 ostate = tp->t_state;
1059 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
1062 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1065 #endif /* TCPDEBUG */
1067 * When the socket is accepting connections (the INPCB is in LISTEN
1068 * state) we look into the SYN cache if this is a new connection
1069 * attempt or the completion of a previous one.
1071 if (so->so_options & SO_ACCEPTCONN) {
1072 struct in_conninfo inc;
1074 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
1075 "tp not listening", __func__));
1076 bzero(&inc, sizeof(inc));
1079 inc.inc_flags |= INC_ISIPV6;
1080 inc.inc6_faddr = ip6->ip6_src;
1081 inc.inc6_laddr = ip6->ip6_dst;
1085 inc.inc_faddr = ip->ip_src;
1086 inc.inc_laddr = ip->ip_dst;
1088 inc.inc_fport = th->th_sport;
1089 inc.inc_lport = th->th_dport;
1090 inc.inc_fibnum = so->so_fibnum;
1093 * Check for an existing connection attempt in syncache if
1094 * the flag is only ACK. A successful lookup creates a new
1095 * socket appended to the listen queue in SYN_RECEIVED state.
1097 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1099 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1101 * Parse the TCP options here because
1102 * syncookies need access to the reflected
1105 tcp_dooptions(&to, optp, optlen, 0);
1107 * NB: syncache_expand() doesn't unlock
1108 * inp and tcpinfo locks.
1110 if (!syncache_expand(&inc, &to, th, &so, m)) {
1112 * No syncache entry or ACK was not
1113 * for our SYN/ACK. Send a RST.
1114 * NB: syncache did its own logging
1115 * of the failure cause.
1117 rstreason = BANDLIM_RST_OPENPORT;
1122 * We completed the 3-way handshake
1123 * but could not allocate a socket
1124 * either due to memory shortage,
1125 * listen queue length limits or
1126 * global socket limits. Send RST
1127 * or wait and have the remote end
1128 * retransmit the ACK for another
1131 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1132 log(LOG_DEBUG, "%s; %s: Listen socket: "
1133 "Socket allocation failed due to "
1134 "limits or memory shortage, %s\n",
1136 V_tcp_sc_rst_sock_fail ?
1137 "sending RST" : "try again");
1138 if (V_tcp_sc_rst_sock_fail) {
1139 rstreason = BANDLIM_UNLIMITED;
1145 * Socket is created in state SYN_RECEIVED.
1146 * Unlock the listen socket, lock the newly
1147 * created socket and update the tp variable.
1149 INP_WUNLOCK(inp); /* listen socket */
1150 inp = sotoinpcb(so);
1152 * New connection inpcb is already locked by
1153 * syncache_expand().
1155 INP_WLOCK_ASSERT(inp);
1156 tp = intotcpcb(inp);
1157 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1158 ("%s: ", __func__));
1159 #ifdef TCP_SIGNATURE
1160 if (sig_checked == 0) {
1161 tcp_dooptions(&to, optp, optlen,
1162 (thflags & TH_SYN) ? TO_SYN : 0);
1163 if (!tcp_signature_verify_input(m, off0, tlen,
1164 optlen, &to, th, tp->t_flags)) {
1167 * In SYN_SENT state if it receives an
1168 * RST, it is allowed for further
1171 if ((thflags & TH_RST) == 0 ||
1172 (tp->t_state == TCPS_SYN_SENT) == 0)
1180 * Process the segment and the data it
1181 * contains. tcp_do_segment() consumes
1182 * the mbuf chain and unlocks the inpcb.
1184 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1186 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1187 return (IPPROTO_DONE);
1190 * Segment flag validation for new connection attempts:
1192 * Our (SYN|ACK) response was rejected.
1193 * Check with syncache and remove entry to prevent
1196 * NB: syncache_chkrst does its own logging of failure
1199 if (thflags & TH_RST) {
1200 syncache_chkrst(&inc, th);
1204 * We can't do anything without SYN.
1206 if ((thflags & TH_SYN) == 0) {
1207 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1208 log(LOG_DEBUG, "%s; %s: Listen socket: "
1209 "SYN is missing, segment ignored\n",
1211 TCPSTAT_INC(tcps_badsyn);
1215 * (SYN|ACK) is bogus on a listen socket.
1217 if (thflags & TH_ACK) {
1218 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1219 log(LOG_DEBUG, "%s; %s: Listen socket: "
1220 "SYN|ACK invalid, segment rejected\n",
1222 syncache_badack(&inc); /* XXX: Not needed! */
1223 TCPSTAT_INC(tcps_badsyn);
1224 rstreason = BANDLIM_RST_OPENPORT;
1228 * If the drop_synfin option is enabled, drop all
1229 * segments with both the SYN and FIN bits set.
1230 * This prevents e.g. nmap from identifying the
1232 * XXX: Poor reasoning. nmap has other methods
1233 * and is constantly refining its stack detection
1235 * XXX: This is a violation of the TCP specification
1236 * and was used by RFC1644.
1238 if ((thflags & TH_FIN) && V_drop_synfin) {
1239 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1240 log(LOG_DEBUG, "%s; %s: Listen socket: "
1241 "SYN|FIN segment ignored (based on "
1242 "sysctl setting)\n", s, __func__);
1243 TCPSTAT_INC(tcps_badsyn);
1247 * Segment's flags are (SYN) or (SYN|FIN).
1249 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1250 * as they do not affect the state of the TCP FSM.
1251 * The data pointed to by TH_URG and th_urp is ignored.
1253 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1254 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1255 KASSERT(thflags & (TH_SYN),
1256 ("%s: Listen socket: TH_SYN not set", __func__));
1259 * If deprecated address is forbidden,
1260 * we do not accept SYN to deprecated interface
1261 * address to prevent any new inbound connection from
1262 * getting established.
1263 * When we do not accept SYN, we send a TCP RST,
1264 * with deprecated source address (instead of dropping
1265 * it). We compromise it as it is much better for peer
1266 * to send a RST, and RST will be the final packet
1269 * If we do not forbid deprecated addresses, we accept
1270 * the SYN packet. RFC2462 does not suggest dropping
1272 * If we decipher RFC2462 5.5.4, it says like this:
1273 * 1. use of deprecated addr with existing
1274 * communication is okay - "SHOULD continue to be
1276 * 2. use of it with new communication:
1277 * (2a) "SHOULD NOT be used if alternate address
1278 * with sufficient scope is available"
1279 * (2b) nothing mentioned otherwise.
1280 * Here we fall into (2b) case as we have no choice in
1281 * our source address selection - we must obey the peer.
1283 * The wording in RFC2462 is confusing, and there are
1284 * multiple description text for deprecated address
1285 * handling - worse, they are not exactly the same.
1286 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1288 if (isipv6 && !V_ip6_use_deprecated) {
1289 struct in6_ifaddr *ia6;
1291 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
1293 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1294 ifa_free(&ia6->ia_ifa);
1295 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1296 log(LOG_DEBUG, "%s; %s: Listen socket: "
1297 "Connection attempt to deprecated "
1298 "IPv6 address rejected\n",
1300 rstreason = BANDLIM_RST_OPENPORT;
1304 ifa_free(&ia6->ia_ifa);
1308 * Basic sanity checks on incoming SYN requests:
1309 * Don't respond if the destination is a link layer
1310 * broadcast according to RFC1122 4.2.3.10, p. 104.
1311 * If it is from this socket it must be forged.
1312 * Don't respond if the source or destination is a
1313 * global or subnet broad- or multicast address.
1314 * Note that it is quite possible to receive unicast
1315 * link-layer packets with a broadcast IP address. Use
1316 * in_broadcast() to find them.
1318 if (m->m_flags & (M_BCAST|M_MCAST)) {
1319 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1320 log(LOG_DEBUG, "%s; %s: Listen socket: "
1321 "Connection attempt from broad- or multicast "
1322 "link layer address ignored\n", s, __func__);
1327 if (th->th_dport == th->th_sport &&
1328 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1329 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1330 log(LOG_DEBUG, "%s; %s: Listen socket: "
1331 "Connection attempt to/from self "
1332 "ignored\n", s, __func__);
1335 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1336 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1337 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1338 log(LOG_DEBUG, "%s; %s: Listen socket: "
1339 "Connection attempt from/to multicast "
1340 "address ignored\n", s, __func__);
1345 #if defined(INET) && defined(INET6)
1350 if (th->th_dport == th->th_sport &&
1351 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1352 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1353 log(LOG_DEBUG, "%s; %s: Listen socket: "
1354 "Connection attempt from/to self "
1355 "ignored\n", s, __func__);
1358 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1359 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1360 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1361 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1362 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1363 log(LOG_DEBUG, "%s; %s: Listen socket: "
1364 "Connection attempt from/to broad- "
1365 "or multicast address ignored\n",
1372 * SYN appears to be valid. Create compressed TCP state
1376 if (so->so_options & SO_DEBUG)
1377 tcp_trace(TA_INPUT, ostate, tp,
1378 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1380 tcp_dooptions(&to, optp, optlen, TO_SYN);
1381 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL);
1383 * Entry added to syncache and mbuf consumed.
1384 * Only the listen socket is unlocked by syncache_add().
1386 if (ti_locked == TI_RLOCKED) {
1387 INP_INFO_RUNLOCK(&V_tcbinfo);
1388 ti_locked = TI_UNLOCKED;
1390 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1391 return (IPPROTO_DONE);
1392 } else if (tp->t_state == TCPS_LISTEN) {
1394 * When a listen socket is torn down the SO_ACCEPTCONN
1395 * flag is removed first while connections are drained
1396 * from the accept queue in a unlock/lock cycle of the
1397 * ACCEPT_LOCK, opening a race condition allowing a SYN
1398 * attempt go through unhandled.
1403 #ifdef TCP_SIGNATURE
1404 if (sig_checked == 0) {
1405 tcp_dooptions(&to, optp, optlen,
1406 (thflags & TH_SYN) ? TO_SYN : 0);
1407 if (!tcp_signature_verify_input(m, off0, tlen, optlen, &to,
1411 * In SYN_SENT state if it receives an RST, it is
1412 * allowed for further processing.
1414 if ((thflags & TH_RST) == 0 ||
1415 (tp->t_state == TCPS_SYN_SENT) == 0)
1422 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th);
1425 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1426 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1427 * the inpcb, and unlocks pcbinfo.
1429 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked);
1430 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1431 return (IPPROTO_DONE);
1434 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th);
1436 if (ti_locked == TI_RLOCKED) {
1437 INP_INFO_RUNLOCK(&V_tcbinfo);
1438 ti_locked = TI_UNLOCKED;
1442 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset "
1443 "ti_locked: %d", __func__, ti_locked));
1444 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1449 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1452 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1453 m = NULL; /* mbuf chain got consumed. */
1458 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th);
1460 if (ti_locked == TI_RLOCKED) {
1461 INP_INFO_RUNLOCK(&V_tcbinfo);
1462 ti_locked = TI_UNLOCKED;
1466 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock "
1467 "ti_locked: %d", __func__, ti_locked));
1468 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1476 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1481 return (IPPROTO_DONE);
1485 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1486 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos,
1489 int thflags, acked, ourfinisacked, needoutput = 0;
1490 int rstreason, todrop, win;
1493 struct in_conninfo *inc;
1499 * The size of tcp_saveipgen must be the size of the max ip header,
1502 u_char tcp_saveipgen[IP6_HDR_LEN];
1503 struct tcphdr tcp_savetcp;
1506 thflags = th->th_flags;
1507 inc = &tp->t_inpcb->inp_inc;
1508 tp->sackhint.last_sack_ack = 0;
1511 * If this is either a state-changing packet or current state isn't
1512 * established, we require a write lock on tcbinfo. Otherwise, we
1513 * allow the tcbinfo to be in either alocked or unlocked, as the
1514 * caller may have unnecessarily acquired a write lock due to a race.
1516 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
1517 tp->t_state != TCPS_ESTABLISHED) {
1518 KASSERT(ti_locked == TI_RLOCKED, ("%s ti_locked %d for "
1519 "SYN/FIN/RST/!EST", __func__, ti_locked));
1520 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1523 if (ti_locked == TI_RLOCKED)
1524 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1526 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST "
1527 "ti_locked: %d", __func__, ti_locked));
1528 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1532 INP_WLOCK_ASSERT(tp->t_inpcb);
1533 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1535 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1539 * Segment received on connection.
1540 * Reset idle time and keep-alive timer.
1541 * XXX: This should be done after segment
1542 * validation to ignore broken/spoofed segs.
1544 tp->t_rcvtime = ticks;
1545 if (TCPS_HAVEESTABLISHED(tp->t_state))
1546 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
1549 * Scale up the window into a 32-bit value.
1550 * For the SYN_SENT state the scale is zero.
1552 tiwin = th->th_win << tp->snd_scale;
1555 * TCP ECN processing.
1557 if (tp->t_flags & TF_ECN_PERMIT) {
1558 if (thflags & TH_CWR)
1559 tp->t_flags &= ~TF_ECN_SND_ECE;
1560 switch (iptos & IPTOS_ECN_MASK) {
1562 tp->t_flags |= TF_ECN_SND_ECE;
1563 TCPSTAT_INC(tcps_ecn_ce);
1565 case IPTOS_ECN_ECT0:
1566 TCPSTAT_INC(tcps_ecn_ect0);
1568 case IPTOS_ECN_ECT1:
1569 TCPSTAT_INC(tcps_ecn_ect1);
1573 /* Process a packet differently from RFC3168. */
1574 cc_ecnpkt_handler(tp, th, iptos);
1576 /* Congestion experienced. */
1577 if (thflags & TH_ECE) {
1578 cc_cong_signal(tp, th, CC_ECN);
1583 * Parse options on any incoming segment.
1585 tcp_dooptions(&to, (u_char *)(th + 1),
1586 (th->th_off << 2) - sizeof(struct tcphdr),
1587 (thflags & TH_SYN) ? TO_SYN : 0);
1590 * If echoed timestamp is later than the current time,
1591 * fall back to non RFC1323 RTT calculation. Normalize
1592 * timestamp if syncookies were used when this connection
1595 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1596 to.to_tsecr -= tp->ts_offset;
1597 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks()))
1601 * If timestamps were negotiated during SYN/ACK they should
1602 * appear on every segment during this session and vice versa.
1604 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) {
1605 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1606 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1607 "no action\n", s, __func__);
1611 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) {
1612 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1613 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1614 "no action\n", s, __func__);
1620 * Process options only when we get SYN/ACK back. The SYN case
1621 * for incoming connections is handled in tcp_syncache.
1622 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1623 * or <SYN,ACK>) segment itself is never scaled.
1624 * XXX this is traditional behavior, may need to be cleaned up.
1626 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1627 if ((to.to_flags & TOF_SCALE) &&
1628 (tp->t_flags & TF_REQ_SCALE)) {
1629 tp->t_flags |= TF_RCVD_SCALE;
1630 tp->snd_scale = to.to_wscale;
1633 * Initial send window. It will be updated with
1634 * the next incoming segment to the scaled value.
1636 tp->snd_wnd = th->th_win;
1637 if (to.to_flags & TOF_TS) {
1638 tp->t_flags |= TF_RCVD_TSTMP;
1639 tp->ts_recent = to.to_tsval;
1640 tp->ts_recent_age = tcp_ts_getticks();
1642 if (to.to_flags & TOF_MSS)
1643 tcp_mss(tp, to.to_mss);
1644 if ((tp->t_flags & TF_SACK_PERMIT) &&
1645 (to.to_flags & TOF_SACKPERM) == 0)
1646 tp->t_flags &= ~TF_SACK_PERMIT;
1650 * Header prediction: check for the two common cases
1651 * of a uni-directional data xfer. If the packet has
1652 * no control flags, is in-sequence, the window didn't
1653 * change and we're not retransmitting, it's a
1654 * candidate. If the length is zero and the ack moved
1655 * forward, we're the sender side of the xfer. Just
1656 * free the data acked & wake any higher level process
1657 * that was blocked waiting for space. If the length
1658 * is non-zero and the ack didn't move, we're the
1659 * receiver side. If we're getting packets in-order
1660 * (the reassembly queue is empty), add the data to
1661 * the socket buffer and note that we need a delayed ack.
1662 * Make sure that the hidden state-flags are also off.
1663 * Since we check for TCPS_ESTABLISHED first, it can only
1666 if (tp->t_state == TCPS_ESTABLISHED &&
1667 th->th_seq == tp->rcv_nxt &&
1668 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1669 tp->snd_nxt == tp->snd_max &&
1670 tiwin && tiwin == tp->snd_wnd &&
1671 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1672 LIST_EMPTY(&tp->t_segq) &&
1673 ((to.to_flags & TOF_TS) == 0 ||
1674 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1677 * If last ACK falls within this segment's sequence numbers,
1678 * record the timestamp.
1679 * NOTE that the test is modified according to the latest
1680 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1682 if ((to.to_flags & TOF_TS) != 0 &&
1683 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1684 tp->ts_recent_age = tcp_ts_getticks();
1685 tp->ts_recent = to.to_tsval;
1689 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1690 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1691 !IN_RECOVERY(tp->t_flags) &&
1692 (to.to_flags & TOF_SACK) == 0 &&
1693 TAILQ_EMPTY(&tp->snd_holes)) {
1695 * This is a pure ack for outstanding data.
1697 if (ti_locked == TI_RLOCKED)
1698 INP_INFO_RUNLOCK(&V_tcbinfo);
1699 ti_locked = TI_UNLOCKED;
1701 TCPSTAT_INC(tcps_predack);
1704 * "bad retransmit" recovery.
1706 if (tp->t_rxtshift == 1 &&
1707 tp->t_flags & TF_PREVVALID &&
1708 (int)(ticks - tp->t_badrxtwin) < 0) {
1709 cc_cong_signal(tp, th, CC_RTO_ERR);
1713 * Recalculate the transmit timer / rtt.
1715 * Some boxes send broken timestamp replies
1716 * during the SYN+ACK phase, ignore
1717 * timestamps of 0 or we could calculate a
1718 * huge RTT and blow up the retransmit timer.
1720 if ((to.to_flags & TOF_TS) != 0 &&
1724 t = tcp_ts_getticks() - to.to_tsecr;
1725 if (!tp->t_rttlow || tp->t_rttlow > t)
1728 TCP_TS_TO_TICKS(t) + 1);
1729 } else if (tp->t_rtttime &&
1730 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1731 if (!tp->t_rttlow ||
1732 tp->t_rttlow > ticks - tp->t_rtttime)
1733 tp->t_rttlow = ticks - tp->t_rtttime;
1735 ticks - tp->t_rtttime);
1737 acked = BYTES_THIS_ACK(tp, th);
1739 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1740 hhook_run_tcp_est_in(tp, th, &to);
1742 TCPSTAT_INC(tcps_rcvackpack);
1743 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1744 sbdrop(&so->so_snd, acked);
1745 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1746 SEQ_LEQ(th->th_ack, tp->snd_recover))
1747 tp->snd_recover = th->th_ack - 1;
1750 * Let the congestion control algorithm update
1751 * congestion control related information. This
1752 * typically means increasing the congestion
1755 cc_ack_received(tp, th, CC_ACK);
1757 tp->snd_una = th->th_ack;
1759 * Pull snd_wl2 up to prevent seq wrap relative
1762 tp->snd_wl2 = th->th_ack;
1765 ND6_HINT(tp); /* Some progress has been made. */
1768 * If all outstanding data are acked, stop
1769 * retransmit timer, otherwise restart timer
1770 * using current (possibly backed-off) value.
1771 * If process is waiting for space,
1772 * wakeup/selwakeup/signal. If data
1773 * are ready to send, let tcp_output
1774 * decide between more output or persist.
1777 if (so->so_options & SO_DEBUG)
1778 tcp_trace(TA_INPUT, ostate, tp,
1779 (void *)tcp_saveipgen,
1782 if (tp->snd_una == tp->snd_max)
1783 tcp_timer_activate(tp, TT_REXMT, 0);
1784 else if (!tcp_timer_active(tp, TT_PERSIST))
1785 tcp_timer_activate(tp, TT_REXMT,
1788 if (sbavail(&so->so_snd))
1789 (void) tcp_output(tp);
1792 } else if (th->th_ack == tp->snd_una &&
1793 tlen <= sbspace(&so->so_rcv)) {
1794 int newsize = 0; /* automatic sockbuf scaling */
1797 * This is a pure, in-sequence data packet with
1798 * nothing on the reassembly queue and we have enough
1799 * buffer space to take it.
1801 if (ti_locked == TI_RLOCKED)
1802 INP_INFO_RUNLOCK(&V_tcbinfo);
1803 ti_locked = TI_UNLOCKED;
1805 /* Clean receiver SACK report if present */
1806 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1807 tcp_clean_sackreport(tp);
1808 TCPSTAT_INC(tcps_preddat);
1809 tp->rcv_nxt += tlen;
1811 * Pull snd_wl1 up to prevent seq wrap relative to
1814 tp->snd_wl1 = th->th_seq;
1816 * Pull rcv_up up to prevent seq wrap relative to
1819 tp->rcv_up = tp->rcv_nxt;
1820 TCPSTAT_INC(tcps_rcvpack);
1821 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1822 ND6_HINT(tp); /* Some progress has been made */
1824 if (so->so_options & SO_DEBUG)
1825 tcp_trace(TA_INPUT, ostate, tp,
1826 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1829 * Automatic sizing of receive socket buffer. Often the send
1830 * buffer size is not optimally adjusted to the actual network
1831 * conditions at hand (delay bandwidth product). Setting the
1832 * buffer size too small limits throughput on links with high
1833 * bandwidth and high delay (eg. trans-continental/oceanic links).
1835 * On the receive side the socket buffer memory is only rarely
1836 * used to any significant extent. This allows us to be much
1837 * more aggressive in scaling the receive socket buffer. For
1838 * the case that the buffer space is actually used to a large
1839 * extent and we run out of kernel memory we can simply drop
1840 * the new segments; TCP on the sender will just retransmit it
1841 * later. Setting the buffer size too big may only consume too
1842 * much kernel memory if the application doesn't read() from
1843 * the socket or packet loss or reordering makes use of the
1846 * The criteria to step up the receive buffer one notch are:
1847 * 1. Application has not set receive buffer size with
1848 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE.
1849 * 2. the number of bytes received during the time it takes
1850 * one timestamp to be reflected back to us (the RTT);
1851 * 3. received bytes per RTT is within seven eighth of the
1852 * current socket buffer size;
1853 * 4. receive buffer size has not hit maximal automatic size;
1855 * This algorithm does one step per RTT at most and only if
1856 * we receive a bulk stream w/o packet losses or reorderings.
1857 * Shrinking the buffer during idle times is not necessary as
1858 * it doesn't consume any memory when idle.
1860 * TODO: Only step up if the application is actually serving
1861 * the buffer to better manage the socket buffer resources.
1863 if (V_tcp_do_autorcvbuf &&
1864 (to.to_flags & TOF_TS) &&
1866 (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
1867 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) &&
1868 to.to_tsecr - tp->rfbuf_ts < hz) {
1870 (so->so_rcv.sb_hiwat / 8 * 7) &&
1871 so->so_rcv.sb_hiwat <
1872 V_tcp_autorcvbuf_max) {
1874 min(so->so_rcv.sb_hiwat +
1875 V_tcp_autorcvbuf_inc,
1876 V_tcp_autorcvbuf_max);
1878 /* Start over with next RTT. */
1882 tp->rfbuf_cnt += tlen; /* add up */
1885 /* Add data to socket buffer. */
1886 SOCKBUF_LOCK(&so->so_rcv);
1887 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1891 * Set new socket buffer size.
1892 * Give up when limit is reached.
1895 if (!sbreserve_locked(&so->so_rcv,
1897 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1898 m_adj(m, drop_hdrlen); /* delayed header drop */
1899 sbappendstream_locked(&so->so_rcv, m, 0);
1901 /* NB: sorwakeup_locked() does an implicit unlock. */
1902 sorwakeup_locked(so);
1903 if (DELAY_ACK(tp, tlen)) {
1904 tp->t_flags |= TF_DELACK;
1906 tp->t_flags |= TF_ACKNOW;
1914 * Calculate amount of space in receive window,
1915 * and then do TCP input processing.
1916 * Receive window is amount of space in rcv queue,
1917 * but not less than advertised window.
1919 win = sbspace(&so->so_rcv);
1922 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1924 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1928 switch (tp->t_state) {
1931 * If the state is SYN_RECEIVED:
1932 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1934 case TCPS_SYN_RECEIVED:
1935 if ((thflags & TH_ACK) &&
1936 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1937 SEQ_GT(th->th_ack, tp->snd_max))) {
1938 rstreason = BANDLIM_RST_OPENPORT;
1944 * If the state is SYN_SENT:
1945 * if seg contains an ACK, but not for our SYN, drop the input.
1946 * if seg contains a RST, then drop the connection.
1947 * if seg does not contain SYN, then drop it.
1948 * Otherwise this is an acceptable SYN segment
1949 * initialize tp->rcv_nxt and tp->irs
1950 * if seg contains ack then advance tp->snd_una
1951 * if seg contains an ECE and ECN support is enabled, the stream
1953 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1954 * arrange for segment to be acked (eventually)
1955 * continue processing rest of data/controls, beginning with URG
1958 if ((thflags & TH_ACK) &&
1959 (SEQ_LEQ(th->th_ack, tp->iss) ||
1960 SEQ_GT(th->th_ack, tp->snd_max))) {
1961 rstreason = BANDLIM_UNLIMITED;
1964 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) {
1965 TCP_PROBE5(connect__refused, NULL, tp,
1966 mtod(m, const char *), tp, th);
1967 tp = tcp_drop(tp, ECONNREFUSED);
1969 if (thflags & TH_RST)
1971 if (!(thflags & TH_SYN))
1974 tp->irs = th->th_seq;
1976 if (thflags & TH_ACK) {
1977 TCPSTAT_INC(tcps_connects);
1980 mac_socketpeer_set_from_mbuf(m, so);
1982 /* Do window scaling on this connection? */
1983 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1984 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1985 tp->rcv_scale = tp->request_r_scale;
1987 tp->rcv_adv += imin(tp->rcv_wnd,
1988 TCP_MAXWIN << tp->rcv_scale);
1989 tp->snd_una++; /* SYN is acked */
1991 * If there's data, delay ACK; if there's also a FIN
1992 * ACKNOW will be turned on later.
1994 if (DELAY_ACK(tp, tlen) && tlen != 0)
1995 tcp_timer_activate(tp, TT_DELACK,
1998 tp->t_flags |= TF_ACKNOW;
2000 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
2001 tp->t_flags |= TF_ECN_PERMIT;
2002 TCPSTAT_INC(tcps_ecn_shs);
2006 * Received <SYN,ACK> in SYN_SENT[*] state.
2008 * SYN_SENT --> ESTABLISHED
2009 * SYN_SENT* --> FIN_WAIT_1
2011 tp->t_starttime = ticks;
2012 if (tp->t_flags & TF_NEEDFIN) {
2013 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2014 tp->t_flags &= ~TF_NEEDFIN;
2017 tcp_state_change(tp, TCPS_ESTABLISHED);
2018 TCP_PROBE5(connect__established, NULL, tp,
2019 mtod(m, const char *), tp, th);
2021 tcp_timer_activate(tp, TT_KEEP,
2026 * Received initial SYN in SYN-SENT[*] state =>
2027 * simultaneous open.
2028 * If it succeeds, connection is * half-synchronized.
2029 * Otherwise, do 3-way handshake:
2030 * SYN-SENT -> SYN-RECEIVED
2031 * SYN-SENT* -> SYN-RECEIVED*
2033 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
2034 tcp_timer_activate(tp, TT_REXMT, 0);
2035 tcp_state_change(tp, TCPS_SYN_RECEIVED);
2038 KASSERT(ti_locked == TI_RLOCKED, ("%s: trimthenstep6: "
2039 "ti_locked %d", __func__, ti_locked));
2040 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2041 INP_WLOCK_ASSERT(tp->t_inpcb);
2044 * Advance th->th_seq to correspond to first data byte.
2045 * If data, trim to stay within window,
2046 * dropping FIN if necessary.
2049 if (tlen > tp->rcv_wnd) {
2050 todrop = tlen - tp->rcv_wnd;
2054 TCPSTAT_INC(tcps_rcvpackafterwin);
2055 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2057 tp->snd_wl1 = th->th_seq - 1;
2058 tp->rcv_up = th->th_seq;
2060 * Client side of transaction: already sent SYN and data.
2061 * If the remote host used T/TCP to validate the SYN,
2062 * our data will be ACK'd; if so, enter normal data segment
2063 * processing in the middle of step 5, ack processing.
2064 * Otherwise, goto step 6.
2066 if (thflags & TH_ACK)
2072 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
2073 * do normal processing.
2075 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
2079 break; /* continue normal processing */
2083 * States other than LISTEN or SYN_SENT.
2084 * First check the RST flag and sequence number since reset segments
2085 * are exempt from the timestamp and connection count tests. This
2086 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2087 * below which allowed reset segments in half the sequence space
2088 * to fall though and be processed (which gives forged reset
2089 * segments with a random sequence number a 50 percent chance of
2090 * killing a connection).
2091 * Then check timestamp, if present.
2092 * Then check the connection count, if present.
2093 * Then check that at least some bytes of segment are within
2094 * receive window. If segment begins before rcv_nxt,
2095 * drop leading data (and SYN); if nothing left, just ack.
2097 if (thflags & TH_RST) {
2099 * RFC5961 Section 3.2
2101 * - RST drops connection only if SEG.SEQ == RCV.NXT.
2102 * - If RST is in window, we send challenge ACK.
2104 * Note: to take into account delayed ACKs, we should
2105 * test against last_ack_sent instead of rcv_nxt.
2106 * Note 2: we handle special case of closed window, not
2107 * covered by the RFC.
2109 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2110 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
2111 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) {
2113 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2114 KASSERT(ti_locked == TI_RLOCKED,
2115 ("%s: TH_RST ti_locked %d, th %p tp %p",
2116 __func__, ti_locked, th, tp));
2117 KASSERT(tp->t_state != TCPS_SYN_SENT,
2118 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p",
2121 if (V_tcp_insecure_rst ||
2122 tp->last_ack_sent == th->th_seq) {
2123 TCPSTAT_INC(tcps_drops);
2124 /* Drop the connection. */
2125 switch (tp->t_state) {
2126 case TCPS_SYN_RECEIVED:
2127 so->so_error = ECONNREFUSED;
2129 case TCPS_ESTABLISHED:
2130 case TCPS_FIN_WAIT_1:
2131 case TCPS_FIN_WAIT_2:
2132 case TCPS_CLOSE_WAIT:
2133 so->so_error = ECONNRESET;
2135 tcp_state_change(tp, TCPS_CLOSED);
2141 TCPSTAT_INC(tcps_badrst);
2142 /* Send challenge ACK. */
2143 tcp_respond(tp, mtod(m, void *), th, m,
2144 tp->rcv_nxt, tp->snd_nxt, TH_ACK);
2145 tp->last_ack_sent = tp->rcv_nxt;
2153 * RFC5961 Section 4.2
2154 * Send challenge ACK for any SYN in synchronized state.
2156 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT) {
2157 KASSERT(ti_locked == TI_RLOCKED,
2158 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked));
2159 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2161 TCPSTAT_INC(tcps_badsyn);
2162 if (V_tcp_insecure_syn &&
2163 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2164 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2165 tp = tcp_drop(tp, ECONNRESET);
2166 rstreason = BANDLIM_UNLIMITED;
2168 /* Send challenge ACK. */
2169 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt,
2170 tp->snd_nxt, TH_ACK);
2171 tp->last_ack_sent = tp->rcv_nxt;
2178 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2179 * and it's less than ts_recent, drop it.
2181 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2182 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2184 /* Check to see if ts_recent is over 24 days old. */
2185 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
2187 * Invalidate ts_recent. If this segment updates
2188 * ts_recent, the age will be reset later and ts_recent
2189 * will get a valid value. If it does not, setting
2190 * ts_recent to zero will at least satisfy the
2191 * requirement that zero be placed in the timestamp
2192 * echo reply when ts_recent isn't valid. The
2193 * age isn't reset until we get a valid ts_recent
2194 * because we don't want out-of-order segments to be
2195 * dropped when ts_recent is old.
2199 TCPSTAT_INC(tcps_rcvduppack);
2200 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2201 TCPSTAT_INC(tcps_pawsdrop);
2209 * In the SYN-RECEIVED state, validate that the packet belongs to
2210 * this connection before trimming the data to fit the receive
2211 * window. Check the sequence number versus IRS since we know
2212 * the sequence numbers haven't wrapped. This is a partial fix
2213 * for the "LAND" DoS attack.
2215 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2216 rstreason = BANDLIM_RST_OPENPORT;
2220 todrop = tp->rcv_nxt - th->th_seq;
2222 if (thflags & TH_SYN) {
2232 * Following if statement from Stevens, vol. 2, p. 960.
2235 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2237 * Any valid FIN must be to the left of the window.
2238 * At this point the FIN must be a duplicate or out
2239 * of sequence; drop it.
2244 * Send an ACK to resynchronize and drop any data.
2245 * But keep on processing for RST or ACK.
2247 tp->t_flags |= TF_ACKNOW;
2249 TCPSTAT_INC(tcps_rcvduppack);
2250 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2252 TCPSTAT_INC(tcps_rcvpartduppack);
2253 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2255 drop_hdrlen += todrop; /* drop from the top afterwards */
2256 th->th_seq += todrop;
2258 if (th->th_urp > todrop)
2259 th->th_urp -= todrop;
2267 * If new data are received on a connection after the
2268 * user processes are gone, then RST the other end.
2270 if ((so->so_state & SS_NOFDREF) &&
2271 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
2272 KASSERT(ti_locked == TI_RLOCKED, ("%s: SS_NOFDEREF && "
2273 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked));
2274 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2276 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
2277 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data "
2278 "after socket was closed, "
2279 "sending RST and removing tcpcb\n",
2280 s, __func__, tcpstates[tp->t_state], tlen);
2284 TCPSTAT_INC(tcps_rcvafterclose);
2285 rstreason = BANDLIM_UNLIMITED;
2290 * If segment ends after window, drop trailing data
2291 * (and PUSH and FIN); if nothing left, just ACK.
2293 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2295 TCPSTAT_INC(tcps_rcvpackafterwin);
2296 if (todrop >= tlen) {
2297 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2299 * If window is closed can only take segments at
2300 * window edge, and have to drop data and PUSH from
2301 * incoming segments. Continue processing, but
2302 * remember to ack. Otherwise, drop segment
2305 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2306 tp->t_flags |= TF_ACKNOW;
2307 TCPSTAT_INC(tcps_rcvwinprobe);
2311 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2314 thflags &= ~(TH_PUSH|TH_FIN);
2318 * If last ACK falls within this segment's sequence numbers,
2319 * record its timestamp.
2321 * 1) That the test incorporates suggestions from the latest
2322 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2323 * 2) That updating only on newer timestamps interferes with
2324 * our earlier PAWS tests, so this check should be solely
2325 * predicated on the sequence space of this segment.
2326 * 3) That we modify the segment boundary check to be
2327 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2328 * instead of RFC1323's
2329 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2330 * This modified check allows us to overcome RFC1323's
2331 * limitations as described in Stevens TCP/IP Illustrated
2332 * Vol. 2 p.869. In such cases, we can still calculate the
2333 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2335 if ((to.to_flags & TOF_TS) != 0 &&
2336 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2337 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2338 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2339 tp->ts_recent_age = tcp_ts_getticks();
2340 tp->ts_recent = to.to_tsval;
2344 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2345 * flag is on (half-synchronized state), then queue data for
2346 * later processing; else drop segment and return.
2348 if ((thflags & TH_ACK) == 0) {
2349 if (tp->t_state == TCPS_SYN_RECEIVED ||
2350 (tp->t_flags & TF_NEEDSYN))
2352 else if (tp->t_flags & TF_ACKNOW)
2361 switch (tp->t_state) {
2364 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2365 * ESTABLISHED state and continue processing.
2366 * The ACK was checked above.
2368 case TCPS_SYN_RECEIVED:
2370 TCPSTAT_INC(tcps_connects);
2372 /* Do window scaling? */
2373 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2374 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2375 tp->rcv_scale = tp->request_r_scale;
2376 tp->snd_wnd = tiwin;
2380 * SYN-RECEIVED -> ESTABLISHED
2381 * SYN-RECEIVED* -> FIN-WAIT-1
2383 tp->t_starttime = ticks;
2384 if (tp->t_flags & TF_NEEDFIN) {
2385 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2386 tp->t_flags &= ~TF_NEEDFIN;
2388 tcp_state_change(tp, TCPS_ESTABLISHED);
2389 TCP_PROBE5(accept__established, NULL, tp,
2390 mtod(m, const char *), tp, th);
2392 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
2395 * If segment contains data or ACK, will call tcp_reass()
2396 * later; if not, do so now to pass queued data to user.
2398 if (tlen == 0 && (thflags & TH_FIN) == 0)
2399 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
2401 tp->snd_wl1 = th->th_seq - 1;
2405 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2406 * ACKs. If the ack is in the range
2407 * tp->snd_una < th->th_ack <= tp->snd_max
2408 * then advance tp->snd_una to th->th_ack and drop
2409 * data from the retransmission queue. If this ACK reflects
2410 * more up to date window information we update our window information.
2412 case TCPS_ESTABLISHED:
2413 case TCPS_FIN_WAIT_1:
2414 case TCPS_FIN_WAIT_2:
2415 case TCPS_CLOSE_WAIT:
2418 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2419 TCPSTAT_INC(tcps_rcvacktoomuch);
2422 if ((tp->t_flags & TF_SACK_PERMIT) &&
2423 ((to.to_flags & TOF_SACK) ||
2424 !TAILQ_EMPTY(&tp->snd_holes)))
2425 tcp_sack_doack(tp, &to, th->th_ack);
2427 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2428 hhook_run_tcp_est_in(tp, th, &to);
2430 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2431 if (tlen == 0 && tiwin == tp->snd_wnd) {
2433 * If this is the first time we've seen a
2434 * FIN from the remote, this is not a
2435 * duplicate and it needs to be processed
2436 * normally. This happens during a
2437 * simultaneous close.
2439 if ((thflags & TH_FIN) &&
2440 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2444 TCPSTAT_INC(tcps_rcvdupack);
2446 * If we have outstanding data (other than
2447 * a window probe), this is a completely
2448 * duplicate ack (ie, window info didn't
2449 * change and FIN isn't set),
2450 * the ack is the biggest we've
2451 * seen and we've seen exactly our rexmt
2452 * threshhold of them, assume a packet
2453 * has been dropped and retransmit it.
2454 * Kludge snd_nxt & the congestion
2455 * window so we send only this one
2458 * We know we're losing at the current
2459 * window size so do congestion avoidance
2460 * (set ssthresh to half the current window
2461 * and pull our congestion window back to
2462 * the new ssthresh).
2464 * Dup acks mean that packets have left the
2465 * network (they're now cached at the receiver)
2466 * so bump cwnd by the amount in the receiver
2467 * to keep a constant cwnd packets in the
2470 * When using TCP ECN, notify the peer that
2471 * we reduced the cwnd.
2473 if (!tcp_timer_active(tp, TT_REXMT) ||
2474 th->th_ack != tp->snd_una)
2476 else if (++tp->t_dupacks > tcprexmtthresh ||
2477 IN_FASTRECOVERY(tp->t_flags)) {
2478 cc_ack_received(tp, th, CC_DUPACK);
2479 if ((tp->t_flags & TF_SACK_PERMIT) &&
2480 IN_FASTRECOVERY(tp->t_flags)) {
2484 * Compute the amount of data in flight first.
2485 * We can inject new data into the pipe iff
2486 * we have less than 1/2 the original window's
2487 * worth of data in flight.
2489 awnd = (tp->snd_nxt - tp->snd_fack) +
2490 tp->sackhint.sack_bytes_rexmit;
2491 if (awnd < tp->snd_ssthresh) {
2492 tp->snd_cwnd += tp->t_maxseg;
2493 if (tp->snd_cwnd > tp->snd_ssthresh)
2494 tp->snd_cwnd = tp->snd_ssthresh;
2497 tp->snd_cwnd += tp->t_maxseg;
2498 (void) tcp_output(tp);
2500 } else if (tp->t_dupacks == tcprexmtthresh) {
2501 tcp_seq onxt = tp->snd_nxt;
2504 * If we're doing sack, check to
2505 * see if we're already in sack
2506 * recovery. If we're not doing sack,
2507 * check to see if we're in newreno
2510 if (tp->t_flags & TF_SACK_PERMIT) {
2511 if (IN_FASTRECOVERY(tp->t_flags)) {
2516 if (SEQ_LEQ(th->th_ack,
2522 /* Congestion signal before ack. */
2523 cc_cong_signal(tp, th, CC_NDUPACK);
2524 cc_ack_received(tp, th, CC_DUPACK);
2525 tcp_timer_activate(tp, TT_REXMT, 0);
2527 if (tp->t_flags & TF_SACK_PERMIT) {
2529 tcps_sack_recovery_episode);
2530 tp->sack_newdata = tp->snd_nxt;
2531 tp->snd_cwnd = tp->t_maxseg;
2532 (void) tcp_output(tp);
2535 tp->snd_nxt = th->th_ack;
2536 tp->snd_cwnd = tp->t_maxseg;
2537 (void) tcp_output(tp);
2538 KASSERT(tp->snd_limited <= 2,
2539 ("%s: tp->snd_limited too big",
2541 tp->snd_cwnd = tp->snd_ssthresh +
2543 (tp->t_dupacks - tp->snd_limited);
2544 if (SEQ_GT(onxt, tp->snd_nxt))
2547 } else if (V_tcp_do_rfc3042) {
2548 cc_ack_received(tp, th, CC_DUPACK);
2549 u_long oldcwnd = tp->snd_cwnd;
2550 tcp_seq oldsndmax = tp->snd_max;
2554 KASSERT(tp->t_dupacks == 1 ||
2556 ("%s: dupacks not 1 or 2",
2558 if (tp->t_dupacks == 1)
2559 tp->snd_limited = 0;
2561 (tp->snd_nxt - tp->snd_una) +
2562 (tp->t_dupacks - tp->snd_limited) *
2565 * Only call tcp_output when there
2566 * is new data available to be sent.
2567 * Otherwise we would send pure ACKs.
2569 SOCKBUF_LOCK(&so->so_snd);
2570 avail = sbavail(&so->so_snd) -
2571 (tp->snd_nxt - tp->snd_una);
2572 SOCKBUF_UNLOCK(&so->so_snd);
2574 (void) tcp_output(tp);
2575 sent = tp->snd_max - oldsndmax;
2576 if (sent > tp->t_maxseg) {
2577 KASSERT((tp->t_dupacks == 2 &&
2578 tp->snd_limited == 0) ||
2579 (sent == tp->t_maxseg + 1 &&
2580 tp->t_flags & TF_SENTFIN),
2581 ("%s: sent too much",
2583 tp->snd_limited = 2;
2584 } else if (sent > 0)
2586 tp->snd_cwnd = oldcwnd;
2594 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2595 ("%s: th_ack <= snd_una", __func__));
2598 * If the congestion window was inflated to account
2599 * for the other side's cached packets, retract it.
2601 if (IN_FASTRECOVERY(tp->t_flags)) {
2602 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2603 if (tp->t_flags & TF_SACK_PERMIT)
2604 tcp_sack_partialack(tp, th);
2606 tcp_newreno_partial_ack(tp, th);
2608 cc_post_recovery(tp, th);
2612 * If we reach this point, ACK is not a duplicate,
2613 * i.e., it ACKs something we sent.
2615 if (tp->t_flags & TF_NEEDSYN) {
2617 * T/TCP: Connection was half-synchronized, and our
2618 * SYN has been ACK'd (so connection is now fully
2619 * synchronized). Go to non-starred state,
2620 * increment snd_una for ACK of SYN, and check if
2621 * we can do window scaling.
2623 tp->t_flags &= ~TF_NEEDSYN;
2625 /* Do window scaling? */
2626 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2627 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2628 tp->rcv_scale = tp->request_r_scale;
2629 /* Send window already scaled. */
2634 INP_WLOCK_ASSERT(tp->t_inpcb);
2636 acked = BYTES_THIS_ACK(tp, th);
2637 TCPSTAT_INC(tcps_rcvackpack);
2638 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2641 * If we just performed our first retransmit, and the ACK
2642 * arrives within our recovery window, then it was a mistake
2643 * to do the retransmit in the first place. Recover our
2644 * original cwnd and ssthresh, and proceed to transmit where
2647 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID &&
2648 (int)(ticks - tp->t_badrxtwin) < 0)
2649 cc_cong_signal(tp, th, CC_RTO_ERR);
2652 * If we have a timestamp reply, update smoothed
2653 * round trip time. If no timestamp is present but
2654 * transmit timer is running and timed sequence
2655 * number was acked, update smoothed round trip time.
2656 * Since we now have an rtt measurement, cancel the
2657 * timer backoff (cf., Phil Karn's retransmit alg.).
2658 * Recompute the initial retransmit timer.
2660 * Some boxes send broken timestamp replies
2661 * during the SYN+ACK phase, ignore
2662 * timestamps of 0 or we could calculate a
2663 * huge RTT and blow up the retransmit timer.
2665 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
2668 t = tcp_ts_getticks() - to.to_tsecr;
2669 if (!tp->t_rttlow || tp->t_rttlow > t)
2671 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1);
2672 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2673 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2674 tp->t_rttlow = ticks - tp->t_rtttime;
2675 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2679 * If all outstanding data is acked, stop retransmit
2680 * timer and remember to restart (more output or persist).
2681 * If there is more data to be acked, restart retransmit
2682 * timer, using current (possibly backed-off) value.
2684 if (th->th_ack == tp->snd_max) {
2685 tcp_timer_activate(tp, TT_REXMT, 0);
2687 } else if (!tcp_timer_active(tp, TT_PERSIST))
2688 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2691 * If no data (only SYN) was ACK'd,
2692 * skip rest of ACK processing.
2698 * Let the congestion control algorithm update congestion
2699 * control related information. This typically means increasing
2700 * the congestion window.
2702 cc_ack_received(tp, th, CC_ACK);
2704 SOCKBUF_LOCK(&so->so_snd);
2705 if (acked > sbavail(&so->so_snd)) {
2706 tp->snd_wnd -= sbavail(&so->so_snd);
2707 mfree = sbcut_locked(&so->so_snd,
2708 (int)sbavail(&so->so_snd));
2711 mfree = sbcut_locked(&so->so_snd, acked);
2712 tp->snd_wnd -= acked;
2715 /* NB: sowwakeup_locked() does an implicit unlock. */
2716 sowwakeup_locked(so);
2718 /* Detect una wraparound. */
2719 if (!IN_RECOVERY(tp->t_flags) &&
2720 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2721 SEQ_LEQ(th->th_ack, tp->snd_recover))
2722 tp->snd_recover = th->th_ack - 1;
2723 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2724 if (IN_RECOVERY(tp->t_flags) &&
2725 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2726 EXIT_RECOVERY(tp->t_flags);
2728 tp->snd_una = th->th_ack;
2729 if (tp->t_flags & TF_SACK_PERMIT) {
2730 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2731 tp->snd_recover = tp->snd_una;
2733 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2734 tp->snd_nxt = tp->snd_una;
2736 switch (tp->t_state) {
2739 * In FIN_WAIT_1 STATE in addition to the processing
2740 * for the ESTABLISHED state if our FIN is now acknowledged
2741 * then enter FIN_WAIT_2.
2743 case TCPS_FIN_WAIT_1:
2744 if (ourfinisacked) {
2746 * If we can't receive any more
2747 * data, then closing user can proceed.
2748 * Starting the timer is contrary to the
2749 * specification, but if we don't get a FIN
2750 * we'll hang forever.
2753 * we should release the tp also, and use a
2756 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2757 soisdisconnected(so);
2758 tcp_timer_activate(tp, TT_2MSL,
2759 (tcp_fast_finwait2_recycle ?
2760 tcp_finwait2_timeout :
2763 tcp_state_change(tp, TCPS_FIN_WAIT_2);
2768 * In CLOSING STATE in addition to the processing for
2769 * the ESTABLISHED state if the ACK acknowledges our FIN
2770 * then enter the TIME-WAIT state, otherwise ignore
2774 if (ourfinisacked) {
2775 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2777 INP_INFO_RUNLOCK(&V_tcbinfo);
2784 * In LAST_ACK, we may still be waiting for data to drain
2785 * and/or to be acked, as well as for the ack of our FIN.
2786 * If our FIN is now acknowledged, delete the TCB,
2787 * enter the closed state and return.
2790 if (ourfinisacked) {
2791 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2800 INP_WLOCK_ASSERT(tp->t_inpcb);
2803 * Update window information.
2804 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2806 if ((thflags & TH_ACK) &&
2807 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2808 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2809 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2810 /* keep track of pure window updates */
2812 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2813 TCPSTAT_INC(tcps_rcvwinupd);
2814 tp->snd_wnd = tiwin;
2815 tp->snd_wl1 = th->th_seq;
2816 tp->snd_wl2 = th->th_ack;
2817 if (tp->snd_wnd > tp->max_sndwnd)
2818 tp->max_sndwnd = tp->snd_wnd;
2823 * Process segments with URG.
2825 if ((thflags & TH_URG) && th->th_urp &&
2826 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2828 * This is a kludge, but if we receive and accept
2829 * random urgent pointers, we'll crash in
2830 * soreceive. It's hard to imagine someone
2831 * actually wanting to send this much urgent data.
2833 SOCKBUF_LOCK(&so->so_rcv);
2834 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
2835 th->th_urp = 0; /* XXX */
2836 thflags &= ~TH_URG; /* XXX */
2837 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2838 goto dodata; /* XXX */
2841 * If this segment advances the known urgent pointer,
2842 * then mark the data stream. This should not happen
2843 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2844 * a FIN has been received from the remote side.
2845 * In these states we ignore the URG.
2847 * According to RFC961 (Assigned Protocols),
2848 * the urgent pointer points to the last octet
2849 * of urgent data. We continue, however,
2850 * to consider it to indicate the first octet
2851 * of data past the urgent section as the original
2852 * spec states (in one of two places).
2854 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2855 tp->rcv_up = th->th_seq + th->th_urp;
2856 so->so_oobmark = sbavail(&so->so_rcv) +
2857 (tp->rcv_up - tp->rcv_nxt) - 1;
2858 if (so->so_oobmark == 0)
2859 so->so_rcv.sb_state |= SBS_RCVATMARK;
2861 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2863 SOCKBUF_UNLOCK(&so->so_rcv);
2865 * Remove out of band data so doesn't get presented to user.
2866 * This can happen independent of advancing the URG pointer,
2867 * but if two URG's are pending at once, some out-of-band
2868 * data may creep in... ick.
2870 if (th->th_urp <= (u_long)tlen &&
2871 !(so->so_options & SO_OOBINLINE)) {
2872 /* hdr drop is delayed */
2873 tcp_pulloutofband(so, th, m, drop_hdrlen);
2877 * If no out of band data is expected,
2878 * pull receive urgent pointer along
2879 * with the receive window.
2881 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2882 tp->rcv_up = tp->rcv_nxt;
2885 INP_WLOCK_ASSERT(tp->t_inpcb);
2888 * Process the segment text, merging it into the TCP sequencing queue,
2889 * and arranging for acknowledgment of receipt if necessary.
2890 * This process logically involves adjusting tp->rcv_wnd as data
2891 * is presented to the user (this happens in tcp_usrreq.c,
2892 * case PRU_RCVD). If a FIN has already been received on this
2893 * connection then we just ignore the text.
2895 if ((tlen || (thflags & TH_FIN)) &&
2896 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2897 tcp_seq save_start = th->th_seq;
2898 m_adj(m, drop_hdrlen); /* delayed header drop */
2900 * Insert segment which includes th into TCP reassembly queue
2901 * with control block tp. Set thflags to whether reassembly now
2902 * includes a segment with FIN. This handles the common case
2903 * inline (segment is the next to be received on an established
2904 * connection, and the queue is empty), avoiding linkage into
2905 * and removal from the queue and repetition of various
2907 * Set DELACK for segments received in order, but ack
2908 * immediately when segments are out of order (so
2909 * fast retransmit can work).
2911 if (th->th_seq == tp->rcv_nxt &&
2912 LIST_EMPTY(&tp->t_segq) &&
2913 TCPS_HAVEESTABLISHED(tp->t_state)) {
2914 if (DELAY_ACK(tp, tlen))
2915 tp->t_flags |= TF_DELACK;
2917 tp->t_flags |= TF_ACKNOW;
2918 tp->rcv_nxt += tlen;
2919 thflags = th->th_flags & TH_FIN;
2920 TCPSTAT_INC(tcps_rcvpack);
2921 TCPSTAT_ADD(tcps_rcvbyte, tlen);
2923 SOCKBUF_LOCK(&so->so_rcv);
2924 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2927 sbappendstream_locked(&so->so_rcv, m, 0);
2928 /* NB: sorwakeup_locked() does an implicit unlock. */
2929 sorwakeup_locked(so);
2932 * XXX: Due to the header drop above "th" is
2933 * theoretically invalid by now. Fortunately
2934 * m_adj() doesn't actually frees any mbufs
2935 * when trimming from the head.
2937 thflags = tcp_reass(tp, th, &tlen, m);
2938 tp->t_flags |= TF_ACKNOW;
2940 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
2941 tcp_update_sack_list(tp, save_start, save_start + tlen);
2944 * Note the amount of data that peer has sent into
2945 * our window, in order to estimate the sender's
2949 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
2950 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2952 len = so->so_rcv.sb_hiwat;
2960 * If FIN is received ACK the FIN and let the user know
2961 * that the connection is closing.
2963 if (thflags & TH_FIN) {
2964 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2967 * If connection is half-synchronized
2968 * (ie NEEDSYN flag on) then delay ACK,
2969 * so it may be piggybacked when SYN is sent.
2970 * Otherwise, since we received a FIN then no
2971 * more input can be expected, send ACK now.
2973 if (tp->t_flags & TF_NEEDSYN)
2974 tp->t_flags |= TF_DELACK;
2976 tp->t_flags |= TF_ACKNOW;
2979 switch (tp->t_state) {
2982 * In SYN_RECEIVED and ESTABLISHED STATES
2983 * enter the CLOSE_WAIT state.
2985 case TCPS_SYN_RECEIVED:
2986 tp->t_starttime = ticks;
2988 case TCPS_ESTABLISHED:
2989 tcp_state_change(tp, TCPS_CLOSE_WAIT);
2993 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2994 * enter the CLOSING state.
2996 case TCPS_FIN_WAIT_1:
2997 tcp_state_change(tp, TCPS_CLOSING);
3001 * In FIN_WAIT_2 state enter the TIME_WAIT state,
3002 * starting the time-wait timer, turning off the other
3005 case TCPS_FIN_WAIT_2:
3006 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
3007 KASSERT(ti_locked == TI_RLOCKED, ("%s: dodata "
3008 "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
3012 INP_INFO_RUNLOCK(&V_tcbinfo);
3016 if (ti_locked == TI_RLOCKED)
3017 INP_INFO_RUNLOCK(&V_tcbinfo);
3018 ti_locked = TI_UNLOCKED;
3021 if (so->so_options & SO_DEBUG)
3022 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
3027 * Return any desired output.
3029 if (needoutput || (tp->t_flags & TF_ACKNOW))
3030 (void) tcp_output(tp);
3033 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
3034 __func__, ti_locked));
3035 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3036 INP_WLOCK_ASSERT(tp->t_inpcb);
3038 if (tp->t_flags & TF_DELACK) {
3039 tp->t_flags &= ~TF_DELACK;
3040 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
3042 INP_WUNLOCK(tp->t_inpcb);
3047 * Generate an ACK dropping incoming segment if it occupies
3048 * sequence space, where the ACK reflects our state.
3050 * We can now skip the test for the RST flag since all
3051 * paths to this code happen after packets containing
3052 * RST have been dropped.
3054 * In the SYN-RECEIVED state, don't send an ACK unless the
3055 * segment we received passes the SYN-RECEIVED ACK test.
3056 * If it fails send a RST. This breaks the loop in the
3057 * "LAND" DoS attack, and also prevents an ACK storm
3058 * between two listening ports that have been sent forged
3059 * SYN segments, each with the source address of the other.
3061 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3062 (SEQ_GT(tp->snd_una, th->th_ack) ||
3063 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3064 rstreason = BANDLIM_RST_OPENPORT;
3068 if (so->so_options & SO_DEBUG)
3069 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3072 if (ti_locked == TI_RLOCKED)
3073 INP_INFO_RUNLOCK(&V_tcbinfo);
3074 ti_locked = TI_UNLOCKED;
3076 tp->t_flags |= TF_ACKNOW;
3077 (void) tcp_output(tp);
3078 INP_WUNLOCK(tp->t_inpcb);
3083 if (ti_locked == TI_RLOCKED)
3084 INP_INFO_RUNLOCK(&V_tcbinfo);
3085 ti_locked = TI_UNLOCKED;
3088 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3089 INP_WUNLOCK(tp->t_inpcb);
3091 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3095 if (ti_locked == TI_RLOCKED) {
3096 INP_INFO_RUNLOCK(&V_tcbinfo);
3097 ti_locked = TI_UNLOCKED;
3101 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3105 * Drop space held by incoming segment and return.
3108 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3109 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3113 INP_WUNLOCK(tp->t_inpcb);
3118 * Issue RST and make ACK acceptable to originator of segment.
3119 * The mbuf must still include the original packet header.
3123 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3124 int tlen, int rstreason)
3130 struct ip6_hdr *ip6;
3134 INP_WLOCK_ASSERT(tp->t_inpcb);
3137 /* Don't bother if destination was broadcast/multicast. */
3138 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3141 if (mtod(m, struct ip *)->ip_v == 6) {
3142 ip6 = mtod(m, struct ip6_hdr *);
3143 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3144 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3146 /* IPv6 anycast check is done at tcp6_input() */
3149 #if defined(INET) && defined(INET6)
3154 ip = mtod(m, struct ip *);
3155 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3156 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3157 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3158 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3163 /* Perform bandwidth limiting. */
3164 if (badport_bandlim(rstreason) < 0)
3167 /* tcp_respond consumes the mbuf chain. */
3168 if (th->th_flags & TH_ACK) {
3169 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3170 th->th_ack, TH_RST);
3172 if (th->th_flags & TH_SYN)
3174 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3175 (tcp_seq)0, TH_RST|TH_ACK);
3183 * Parse TCP options and place in tcpopt.
3186 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3191 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3193 if (opt == TCPOPT_EOL)
3195 if (opt == TCPOPT_NOP)
3201 if (optlen < 2 || optlen > cnt)
3206 if (optlen != TCPOLEN_MAXSEG)
3208 if (!(flags & TO_SYN))
3210 to->to_flags |= TOF_MSS;
3211 bcopy((char *)cp + 2,
3212 (char *)&to->to_mss, sizeof(to->to_mss));
3213 to->to_mss = ntohs(to->to_mss);
3216 if (optlen != TCPOLEN_WINDOW)
3218 if (!(flags & TO_SYN))
3220 to->to_flags |= TOF_SCALE;
3221 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3223 case TCPOPT_TIMESTAMP:
3224 if (optlen != TCPOLEN_TIMESTAMP)
3226 to->to_flags |= TOF_TS;
3227 bcopy((char *)cp + 2,
3228 (char *)&to->to_tsval, sizeof(to->to_tsval));
3229 to->to_tsval = ntohl(to->to_tsval);
3230 bcopy((char *)cp + 6,
3231 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3232 to->to_tsecr = ntohl(to->to_tsecr);
3234 #ifdef TCP_SIGNATURE
3236 * XXX In order to reply to a host which has set the
3237 * TCP_SIGNATURE option in its initial SYN, we have to
3238 * record the fact that the option was observed here
3239 * for the syncache code to perform the correct response.
3241 case TCPOPT_SIGNATURE:
3242 if (optlen != TCPOLEN_SIGNATURE)
3244 to->to_flags |= TOF_SIGNATURE;
3245 to->to_signature = cp + 2;
3248 case TCPOPT_SACK_PERMITTED:
3249 if (optlen != TCPOLEN_SACK_PERMITTED)
3251 if (!(flags & TO_SYN))
3255 to->to_flags |= TOF_SACKPERM;
3258 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3262 to->to_flags |= TOF_SACK;
3263 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3264 to->to_sacks = cp + 2;
3265 TCPSTAT_INC(tcps_sack_rcv_blocks);
3274 * Pull out of band byte out of a segment so
3275 * it doesn't appear in the user's data queue.
3276 * It is still reflected in the segment length for
3277 * sequencing purposes.
3280 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3283 int cnt = off + th->th_urp - 1;
3286 if (m->m_len > cnt) {
3287 char *cp = mtod(m, caddr_t) + cnt;
3288 struct tcpcb *tp = sototcpcb(so);
3290 INP_WLOCK_ASSERT(tp->t_inpcb);
3293 tp->t_oobflags |= TCPOOB_HAVEDATA;
3294 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3296 if (m->m_flags & M_PKTHDR)
3305 panic("tcp_pulloutofband");
3309 * Collect new round-trip time estimate
3310 * and update averages and current timeout.
3313 tcp_xmit_timer(struct tcpcb *tp, int rtt)
3317 INP_WLOCK_ASSERT(tp->t_inpcb);
3319 TCPSTAT_INC(tcps_rttupdated);
3321 if (tp->t_srtt != 0) {
3323 * srtt is stored as fixed point with 5 bits after the
3324 * binary point (i.e., scaled by 8). The following magic
3325 * is equivalent to the smoothing algorithm in rfc793 with
3326 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3327 * point). Adjust rtt to origin 0.
3329 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3330 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3332 if ((tp->t_srtt += delta) <= 0)
3336 * We accumulate a smoothed rtt variance (actually, a
3337 * smoothed mean difference), then set the retransmit
3338 * timer to smoothed rtt + 4 times the smoothed variance.
3339 * rttvar is stored as fixed point with 4 bits after the
3340 * binary point (scaled by 16). The following is
3341 * equivalent to rfc793 smoothing with an alpha of .75
3342 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3343 * rfc793's wired-in beta.
3347 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3348 if ((tp->t_rttvar += delta) <= 0)
3350 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3351 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3354 * No rtt measurement yet - use the unsmoothed rtt.
3355 * Set the variance to half the rtt (so our first
3356 * retransmit happens at 3*rtt).
3358 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3359 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3360 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3366 * the retransmit should happen at rtt + 4 * rttvar.
3367 * Because of the way we do the smoothing, srtt and rttvar
3368 * will each average +1/2 tick of bias. When we compute
3369 * the retransmit timer, we want 1/2 tick of rounding and
3370 * 1 extra tick because of +-1/2 tick uncertainty in the
3371 * firing of the timer. The bias will give us exactly the
3372 * 1.5 tick we need. But, because the bias is
3373 * statistical, we have to test that we don't drop below
3374 * the minimum feasible timer (which is 2 ticks).
3376 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3377 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3380 * We received an ack for a packet that wasn't retransmitted;
3381 * it is probably safe to discard any error indications we've
3382 * received recently. This isn't quite right, but close enough
3383 * for now (a route might have failed after we sent a segment,
3384 * and the return path might not be symmetrical).
3386 tp->t_softerror = 0;
3390 * Determine a reasonable value for maxseg size.
3391 * If the route is known, check route for mtu.
3392 * If none, use an mss that can be handled on the outgoing interface
3393 * without forcing IP to fragment. If no route is found, route has no mtu,
3394 * or the destination isn't local, use a default, hopefully conservative
3395 * size (usually 512 or the default IP max size, but no more than the mtu
3396 * of the interface), as we can't discover anything about intervening
3397 * gateways or networks. We also initialize the congestion/slow start
3398 * window to be a single segment if the destination isn't local.
3399 * While looking at the routing entry, we also initialize other path-dependent
3400 * parameters from pre-set or cached values in the routing entry.
3402 * Also take into account the space needed for options that we
3403 * send regularly. Make maxseg shorter by that amount to assure
3404 * that we can send maxseg amount of data even when the options
3405 * are present. Store the upper limit of the length of options plus
3408 * NOTE that this routine is only called when we process an incoming
3409 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS
3410 * settings are handled in tcp_mssopt().
3413 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
3414 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
3418 struct inpcb *inp = tp->t_inpcb;
3419 struct hc_metrics_lite metrics;
3422 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3423 size_t min_protoh = isipv6 ?
3424 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3425 sizeof (struct tcpiphdr);
3427 const size_t min_protoh = sizeof(struct tcpiphdr);
3430 INP_WLOCK_ASSERT(tp->t_inpcb);
3432 if (mtuoffer != -1) {
3433 KASSERT(offer == -1, ("%s: conflict", __func__));
3434 offer = mtuoffer - min_protoh;
3441 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap);
3442 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt;
3445 #if defined(INET) && defined(INET6)
3450 maxmtu = tcp_maxmtu(&inp->inp_inc, cap);
3451 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt;
3456 * No route to sender, stay with default mss and return.
3460 * In case we return early we need to initialize metrics
3461 * to a defined state as tcp_hc_get() would do for us
3462 * if there was no cache hit.
3464 if (metricptr != NULL)
3465 bzero(metricptr, sizeof(struct hc_metrics_lite));
3469 /* What have we got? */
3473 * Offer == 0 means that there was no MSS on the SYN
3474 * segment, in this case we use tcp_mssdflt as
3475 * already assigned to t_maxopd above.
3477 offer = tp->t_maxopd;
3482 * Offer == -1 means that we didn't receive SYN yet.
3488 * Prevent DoS attack with too small MSS. Round up
3489 * to at least minmss.
3491 offer = max(offer, V_tcp_minmss);
3495 * rmx information is now retrieved from tcp_hostcache.
3497 tcp_hc_get(&inp->inp_inc, &metrics);
3498 if (metricptr != NULL)
3499 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3502 * If there's a discovered mtu in tcp hostcache, use it.
3503 * Else, use the link mtu.
3505 if (metrics.rmx_mtu)
3506 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3510 mss = maxmtu - min_protoh;
3511 if (!V_path_mtu_discovery &&
3512 !in6_localaddr(&inp->in6p_faddr))
3513 mss = min(mss, V_tcp_v6mssdflt);
3516 #if defined(INET) && defined(INET6)
3521 mss = maxmtu - min_protoh;
3522 if (!V_path_mtu_discovery &&
3523 !in_localaddr(inp->inp_faddr))
3524 mss = min(mss, V_tcp_mssdflt);
3528 * XXX - The above conditional (mss = maxmtu - min_protoh)
3529 * probably violates the TCP spec.
3530 * The problem is that, since we don't know the
3531 * other end's MSS, we are supposed to use a conservative
3532 * default. But, if we do that, then MTU discovery will
3533 * never actually take place, because the conservative
3534 * default is much less than the MTUs typically seen
3535 * on the Internet today. For the moment, we'll sweep
3536 * this under the carpet.
3538 * The conservative default might not actually be a problem
3539 * if the only case this occurs is when sending an initial
3540 * SYN with options and data to a host we've never talked
3541 * to before. Then, they will reply with an MSS value which
3542 * will get recorded and the new parameters should get
3543 * recomputed. For Further Study.
3546 mss = min(mss, offer);
3549 * Sanity check: make sure that maxopd will be large
3550 * enough to allow some data on segments even if the
3551 * all the option space is used (40bytes). Otherwise
3552 * funny things may happen in tcp_output.
3557 * maxopd stores the maximum length of data AND options
3558 * in a segment; maxseg is the amount of data in a normal
3559 * segment. We need to store this value (maxopd) apart
3560 * from maxseg, because now every segment carries options
3561 * and thus we normally have somewhat less data in segments.
3566 * origoffer==-1 indicates that no segments were received yet.
3567 * In this case we just guess.
3569 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
3571 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
3572 mss -= TCPOLEN_TSTAMP_APPA;
3578 tcp_mss(struct tcpcb *tp, int offer)
3584 struct hc_metrics_lite metrics;
3585 struct tcp_ifcap cap;
3587 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3589 bzero(&cap, sizeof(cap));
3590 tcp_mss_update(tp, offer, -1, &metrics, &cap);
3596 * If there's a pipesize, change the socket buffer to that size,
3597 * don't change if sb_hiwat is different than default (then it
3598 * has been changed on purpose with setsockopt).
3599 * Make the socket buffers an integral number of mss units;
3600 * if the mss is larger than the socket buffer, decrease the mss.
3602 so = inp->inp_socket;
3603 SOCKBUF_LOCK(&so->so_snd);
3604 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe)
3605 bufsize = metrics.rmx_sendpipe;
3607 bufsize = so->so_snd.sb_hiwat;
3611 bufsize = roundup(bufsize, mss);
3612 if (bufsize > sb_max)
3614 if (bufsize > so->so_snd.sb_hiwat)
3615 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3617 SOCKBUF_UNLOCK(&so->so_snd);
3620 SOCKBUF_LOCK(&so->so_rcv);
3621 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
3622 bufsize = metrics.rmx_recvpipe;
3624 bufsize = so->so_rcv.sb_hiwat;
3625 if (bufsize > mss) {
3626 bufsize = roundup(bufsize, mss);
3627 if (bufsize > sb_max)
3629 if (bufsize > so->so_rcv.sb_hiwat)
3630 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3632 SOCKBUF_UNLOCK(&so->so_rcv);
3634 /* Check the interface for TSO capabilities. */
3635 if (cap.ifcap & CSUM_TSO) {
3636 tp->t_flags |= TF_TSO;
3637 tp->t_tsomax = cap.tsomax;
3638 tp->t_tsomaxsegcount = cap.tsomaxsegcount;
3639 tp->t_tsomaxsegsize = cap.tsomaxsegsize;
3644 * Determine the MSS option to send on an outgoing SYN.
3647 tcp_mssopt(struct in_conninfo *inc)
3654 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3657 if (inc->inc_flags & INC_ISIPV6) {
3658 mss = V_tcp_v6mssdflt;
3659 maxmtu = tcp_maxmtu6(inc, NULL);
3660 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3663 #if defined(INET) && defined(INET6)
3668 mss = V_tcp_mssdflt;
3669 maxmtu = tcp_maxmtu(inc, NULL);
3670 min_protoh = sizeof(struct tcpiphdr);
3673 #if defined(INET6) || defined(INET)
3674 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3677 if (maxmtu && thcmtu)
3678 mss = min(maxmtu, thcmtu) - min_protoh;
3679 else if (maxmtu || thcmtu)
3680 mss = max(maxmtu, thcmtu) - min_protoh;
3687 * On a partial ack arrives, force the retransmission of the
3688 * next unacknowledged segment. Do not clear tp->t_dupacks.
3689 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3693 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3695 tcp_seq onxt = tp->snd_nxt;
3696 u_long ocwnd = tp->snd_cwnd;
3698 INP_WLOCK_ASSERT(tp->t_inpcb);
3700 tcp_timer_activate(tp, TT_REXMT, 0);
3702 tp->snd_nxt = th->th_ack;
3704 * Set snd_cwnd to one segment beyond acknowledged offset.
3705 * (tp->snd_una has not yet been updated when this function is called.)
3707 tp->snd_cwnd = tp->t_maxseg + BYTES_THIS_ACK(tp, th);
3708 tp->t_flags |= TF_ACKNOW;
3709 (void) tcp_output(tp);
3710 tp->snd_cwnd = ocwnd;
3711 if (SEQ_GT(onxt, tp->snd_nxt))
3714 * Partial window deflation. Relies on fact that tp->snd_una
3717 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
3718 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
3721 tp->snd_cwnd += tp->t_maxseg;