2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2007-2008,2010
5 * Swinburne University of Technology, Melbourne, Australia.
6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
7 * Copyright (c) 2010 The FreeBSD Foundation
10 * Portions of this software were developed at the Centre for Advanced Internet
11 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
12 * James Healy and David Hayes, made possible in part by a grant from the Cisco
13 * University Research Program Fund at Community Foundation Silicon Valley.
15 * Portions of this software were developed at the Centre for Advanced
16 * Internet Architectures, Swinburne University of Technology, Melbourne,
17 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
22 * 1. Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
49 #include "opt_ipfw.h" /* for ipfw_fwd */
51 #include "opt_inet6.h"
52 #include "opt_ipsec.h"
53 #include "opt_tcpdebug.h"
55 #include <sys/param.h>
56 #include <sys/kernel.h>
57 #include <sys/hhook.h>
58 #include <sys/malloc.h>
60 #include <sys/proc.h> /* for proc0 declaration */
61 #include <sys/protosw.h>
62 #include <sys/signalvar.h>
63 #include <sys/socket.h>
64 #include <sys/socketvar.h>
65 #include <sys/sysctl.h>
66 #include <sys/syslog.h>
67 #include <sys/systm.h>
69 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
74 #include <net/route.h>
77 #define TCPSTATES /* for logging */
79 #include <netinet/cc.h>
80 #include <netinet/in.h>
81 #include <netinet/in_pcb.h>
82 #include <netinet/in_systm.h>
83 #include <netinet/in_var.h>
84 #include <netinet/ip.h>
85 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
86 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
87 #include <netinet/ip_var.h>
88 #include <netinet/ip_options.h>
89 #include <netinet/ip6.h>
90 #include <netinet/icmp6.h>
91 #include <netinet6/in6_pcb.h>
92 #include <netinet6/ip6_var.h>
93 #include <netinet6/nd6.h>
94 #include <netinet/tcp_fsm.h>
95 #include <netinet/tcp_seq.h>
96 #include <netinet/tcp_timer.h>
97 #include <netinet/tcp_var.h>
98 #include <netinet6/tcp6_var.h>
99 #include <netinet/tcpip.h>
100 #include <netinet/tcp_syncache.h>
102 #include <netinet/tcp_debug.h>
103 #endif /* TCPDEBUG */
106 #include <netipsec/ipsec.h>
107 #include <netipsec/ipsec6.h>
110 #include <machine/in_cksum.h>
112 #include <security/mac/mac_framework.h>
114 const int tcprexmtthresh = 3;
116 VNET_DEFINE(struct tcpstat, tcpstat);
117 SYSCTL_VNET_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW,
118 &VNET_NAME(tcpstat), tcpstat,
119 "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
121 int tcp_log_in_vain = 0;
122 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
124 "Log all incoming TCP segments to closed ports");
126 VNET_DEFINE(int, blackhole) = 0;
127 #define V_blackhole VNET(blackhole)
128 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
129 &VNET_NAME(blackhole), 0,
130 "Do not send RST on segments to closed ports");
132 VNET_DEFINE(int, tcp_delack_enabled) = 1;
133 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
134 &VNET_NAME(tcp_delack_enabled), 0,
135 "Delay ACK to try and piggyback it onto a data packet");
137 VNET_DEFINE(int, drop_synfin) = 0;
138 #define V_drop_synfin VNET(drop_synfin)
139 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
140 &VNET_NAME(drop_synfin), 0,
141 "Drop TCP packets with SYN+FIN set");
143 VNET_DEFINE(int, tcp_do_rfc3042) = 1;
144 #define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042)
145 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
146 &VNET_NAME(tcp_do_rfc3042), 0,
147 "Enable RFC 3042 (Limited Transmit)");
149 VNET_DEFINE(int, tcp_do_rfc3390) = 1;
150 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
151 &VNET_NAME(tcp_do_rfc3390), 0,
152 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
154 VNET_DEFINE(int, tcp_do_rfc3465) = 1;
155 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW,
156 &VNET_NAME(tcp_do_rfc3465), 0,
157 "Enable RFC 3465 (Appropriate Byte Counting)");
159 VNET_DEFINE(int, tcp_abc_l_var) = 2;
160 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_RW,
161 &VNET_NAME(tcp_abc_l_var), 2,
162 "Cap the max cwnd increment during slow-start to this number of segments");
164 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
166 VNET_DEFINE(int, tcp_do_ecn) = 0;
167 SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_RW,
168 &VNET_NAME(tcp_do_ecn), 0,
171 VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
172 SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_RW,
173 &VNET_NAME(tcp_ecn_maxretries), 0,
174 "Max retries before giving up on ECN");
176 VNET_DEFINE(int, tcp_insecure_rst) = 0;
177 #define V_tcp_insecure_rst VNET(tcp_insecure_rst)
178 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW,
179 &VNET_NAME(tcp_insecure_rst), 0,
180 "Follow the old (insecure) criteria for accepting RST packets");
182 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
183 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
184 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
185 &VNET_NAME(tcp_do_autorcvbuf), 0,
186 "Enable automatic receive buffer sizing");
188 VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024;
189 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
190 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
191 &VNET_NAME(tcp_autorcvbuf_inc), 0,
192 "Incrementor step size of automatic receive buffer");
194 VNET_DEFINE(int, tcp_autorcvbuf_max) = 256*1024;
195 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
196 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
197 &VNET_NAME(tcp_autorcvbuf_max), 0,
198 "Max size of automatic receive buffer");
200 int tcp_read_locking = 1;
201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, read_locking, CTLFLAG_RW,
202 &tcp_read_locking, 0, "Enable read locking strategy");
204 VNET_DEFINE(struct inpcbhead, tcb);
205 #define tcb6 tcb /* for KAME src sync over BSD*'s */
206 VNET_DEFINE(struct inpcbinfo, tcbinfo);
208 static void tcp_dooptions(struct tcpopt *, u_char *, int, int);
209 static void tcp_do_segment(struct mbuf *, struct tcphdr *,
210 struct socket *, struct tcpcb *, int, int, uint8_t,
212 static void tcp_dropwithreset(struct mbuf *, struct tcphdr *,
213 struct tcpcb *, int, int);
214 static void tcp_pulloutofband(struct socket *,
215 struct tcphdr *, struct mbuf *, int);
216 static void tcp_xmit_timer(struct tcpcb *, int);
217 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
218 static void inline cc_ack_received(struct tcpcb *tp, struct tcphdr *th,
220 static void inline cc_conn_init(struct tcpcb *tp);
221 static void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th);
222 static void inline tcp_fields_to_host(struct tcphdr *);
223 static void inline hhook_run_tcp_est_in(struct tcpcb *tp,
224 struct tcphdr *th, struct tcpopt *to);
226 static void inline tcp_fields_to_net(struct tcphdr *);
227 static int inline tcp_signature_verify_input(struct mbuf *, int, int,
228 int, struct tcpopt *, struct tcphdr *, u_int);
232 * Kernel module interface for updating tcpstat. The argument is an index
233 * into tcpstat treated as an array of u_long. While this encodes the
234 * general layout of tcpstat into the caller, it doesn't encode its location,
235 * so that future changes to add, for example, per-CPU stats support won't
236 * cause binary compatibility problems for kernel modules.
239 kmod_tcpstat_inc(int statnum)
242 (*((u_long *)&V_tcpstat + statnum))++;
246 * Wrapper for the TCP established input helper hook.
249 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
251 struct tcp_hhook_data hhook_data;
253 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
258 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
264 * CC wrapper hook functions
267 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type)
269 INP_WLOCK_ASSERT(tp->t_inpcb);
271 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
272 if (tp->snd_cwnd == min(tp->snd_cwnd, tp->snd_wnd))
273 tp->ccv->flags |= CCF_CWND_LIMITED;
275 tp->ccv->flags &= ~CCF_CWND_LIMITED;
277 if (type == CC_ACK) {
278 if (tp->snd_cwnd > tp->snd_ssthresh) {
279 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
280 V_tcp_abc_l_var * tp->t_maxseg);
281 if (tp->t_bytes_acked >= tp->snd_cwnd) {
282 tp->t_bytes_acked -= tp->snd_cwnd;
283 tp->ccv->flags |= CCF_ABC_SENTAWND;
286 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
287 tp->t_bytes_acked = 0;
291 if (CC_ALGO(tp)->ack_received != NULL) {
292 /* XXXLAS: Find a way to live without this */
293 tp->ccv->curack = th->th_ack;
294 CC_ALGO(tp)->ack_received(tp->ccv, type);
299 cc_conn_init(struct tcpcb *tp)
301 struct hc_metrics_lite metrics;
302 struct inpcb *inp = tp->t_inpcb;
305 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
308 INP_WLOCK_ASSERT(tp->t_inpcb);
310 tcp_hc_get(&inp->inp_inc, &metrics);
312 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
314 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
315 TCPSTAT_INC(tcps_usedrtt);
316 if (metrics.rmx_rttvar) {
317 tp->t_rttvar = metrics.rmx_rttvar;
318 TCPSTAT_INC(tcps_usedrttvar);
320 /* default variation is +- 1 rtt */
322 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
324 TCPT_RANGESET(tp->t_rxtcur,
325 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
326 tp->t_rttmin, TCPTV_REXMTMAX);
328 if (metrics.rmx_ssthresh) {
330 * There's some sort of gateway or interface
331 * buffer limit on the path. Use this to set
332 * the slow start threshhold, but set the
333 * threshold to no less than 2*mss.
335 tp->snd_ssthresh = max(2 * tp->t_maxseg, metrics.rmx_ssthresh);
336 TCPSTAT_INC(tcps_usedssthresh);
340 * Set the slow-start flight size depending on whether this
341 * is a local network or not.
343 * Extend this so we cache the cwnd too and retrieve it here.
344 * Make cwnd even bigger than RFC3390 suggests but only if we
345 * have previous experience with the remote host. Be careful
346 * not make cwnd bigger than remote receive window or our own
347 * send socket buffer. Maybe put some additional upper bound
348 * on the retrieved cwnd. Should do incremental updates to
349 * hostcache when cwnd collapses so next connection doesn't
350 * overloads the path again.
352 * XXXAO: Initializing the CWND from the hostcache is broken
353 * and in its current form not RFC conformant. It is disabled
354 * until fixed or removed entirely.
356 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost.
357 * We currently check only in syncache_socket for that.
359 /* #define TCP_METRICS_CWND */
360 #ifdef TCP_METRICS_CWND
361 if (metrics.rmx_cwnd)
362 tp->snd_cwnd = max(tp->t_maxseg, min(metrics.rmx_cwnd / 2,
363 min(tp->snd_wnd, so->so_snd.sb_hiwat)));
366 if (V_tcp_do_rfc3390)
367 tp->snd_cwnd = min(4 * tp->t_maxseg,
368 max(2 * tp->t_maxseg, 4380));
370 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) ||
371 (!isipv6 && in_localaddr(inp->inp_faddr)))
373 else if (in_localaddr(inp->inp_faddr))
375 tp->snd_cwnd = tp->t_maxseg * V_ss_fltsz_local;
377 tp->snd_cwnd = tp->t_maxseg * V_ss_fltsz;
379 if (CC_ALGO(tp)->conn_init != NULL)
380 CC_ALGO(tp)->conn_init(tp->ccv);
384 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
386 INP_WLOCK_ASSERT(tp->t_inpcb);
390 if (!IN_FASTRECOVERY(tp->t_flags)) {
391 tp->snd_recover = tp->snd_max;
392 if (tp->t_flags & TF_ECN_PERMIT)
393 tp->t_flags |= TF_ECN_SND_CWR;
397 if (!IN_CONGRECOVERY(tp->t_flags)) {
398 TCPSTAT_INC(tcps_ecn_rcwnd);
399 tp->snd_recover = tp->snd_max;
400 if (tp->t_flags & TF_ECN_PERMIT)
401 tp->t_flags |= TF_ECN_SND_CWR;
406 tp->t_bytes_acked = 0;
407 EXIT_RECOVERY(tp->t_flags);
408 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
409 tp->t_maxseg) * tp->t_maxseg;
410 tp->snd_cwnd = tp->t_maxseg;
413 TCPSTAT_INC(tcps_sndrexmitbad);
414 /* RTO was unnecessary, so reset everything. */
415 tp->snd_cwnd = tp->snd_cwnd_prev;
416 tp->snd_ssthresh = tp->snd_ssthresh_prev;
417 tp->snd_recover = tp->snd_recover_prev;
418 if (tp->t_flags & TF_WASFRECOVERY)
419 ENTER_FASTRECOVERY(tp->t_flags);
420 if (tp->t_flags & TF_WASCRECOVERY)
421 ENTER_CONGRECOVERY(tp->t_flags);
422 tp->snd_nxt = tp->snd_max;
423 tp->t_flags &= ~TF_PREVVALID;
428 if (CC_ALGO(tp)->cong_signal != NULL) {
430 tp->ccv->curack = th->th_ack;
431 CC_ALGO(tp)->cong_signal(tp->ccv, type);
436 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
438 INP_WLOCK_ASSERT(tp->t_inpcb);
440 /* XXXLAS: KASSERT that we're in recovery? */
442 if (CC_ALGO(tp)->post_recovery != NULL) {
443 tp->ccv->curack = th->th_ack;
444 CC_ALGO(tp)->post_recovery(tp->ccv);
446 /* XXXLAS: EXIT_RECOVERY ? */
447 tp->t_bytes_acked = 0;
451 tcp_fields_to_host(struct tcphdr *th)
454 th->th_seq = ntohl(th->th_seq);
455 th->th_ack = ntohl(th->th_ack);
456 th->th_win = ntohs(th->th_win);
457 th->th_urp = ntohs(th->th_urp);
462 tcp_fields_to_net(struct tcphdr *th)
465 th->th_seq = htonl(th->th_seq);
466 th->th_ack = htonl(th->th_ack);
467 th->th_win = htons(th->th_win);
468 th->th_urp = htons(th->th_urp);
472 tcp_signature_verify_input(struct mbuf *m, int off0, int tlen, int optlen,
473 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag)
477 tcp_fields_to_net(th);
478 ret = tcp_signature_verify(m, off0, tlen, optlen, to, th, tcpbflag);
479 tcp_fields_to_host(th);
484 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
486 #define ND6_HINT(tp) \
488 if ((tp) && (tp)->t_inpcb && \
489 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
490 nd6_nud_hint(NULL, NULL, 0); \
497 * Indicate whether this ack should be delayed. We can delay the ack if
498 * - there is no delayed ack timer in progress and
499 * - our last ack wasn't a 0-sized window. We never want to delay
500 * the ack that opens up a 0-sized window and
501 * - delayed acks are enabled or
502 * - this is a half-synchronized T/TCP connection.
504 #define DELAY_ACK(tp) \
505 ((!tcp_timer_active(tp, TT_DELACK) && \
506 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
507 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
510 * TCP input handling is split into multiple parts:
511 * tcp6_input is a thin wrapper around tcp_input for the extended
512 * ip6_protox[] call format in ip6_input
513 * tcp_input handles primary segment validation, inpcb lookup and
514 * SYN processing on listen sockets
515 * tcp_do_segment processes the ACK and text of the segment for
516 * establishing, established and closing connections
520 tcp6_input(struct mbuf **mp, int *offp, int proto)
522 struct mbuf *m = *mp;
523 struct in6_ifaddr *ia6;
525 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
528 * draft-itojun-ipv6-tcp-to-anycast
529 * better place to put this in?
531 ia6 = ip6_getdstifaddr(m);
532 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
535 ifa_free(&ia6->ia_ifa);
536 ip6 = mtod(m, struct ip6_hdr *);
537 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
538 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
548 tcp_input(struct mbuf *m, int off0)
551 struct ip *ip = NULL;
553 struct inpcb *inp = NULL;
554 struct tcpcb *tp = NULL;
555 struct socket *so = NULL;
561 int rstreason = 0; /* For badport_bandlim accounting purposes */
564 uint8_t sig_checked = 0;
566 #ifdef IPFIREWALL_FORWARD
567 struct m_tag *fwd_tag;
570 struct ip6_hdr *ip6 = NULL;
573 const void *ip6 = NULL;
574 const int isipv6 = 0;
576 struct tcpopt to; /* options in this segment */
577 char *s = NULL; /* address and port logging */
579 #define TI_UNLOCKED 1
585 * The size of tcp_saveipgen must be the size of the max ip header,
588 u_char tcp_saveipgen[IP6_HDR_LEN];
589 struct tcphdr tcp_savetcp;
594 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
598 TCPSTAT_INC(tcps_rcvtotal);
602 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
603 ip6 = mtod(m, struct ip6_hdr *);
604 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
605 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
606 TCPSTAT_INC(tcps_rcvbadsum);
609 th = (struct tcphdr *)((caddr_t)ip6 + off0);
612 * Be proactive about unspecified IPv6 address in source.
613 * As we use all-zero to indicate unbounded/unconnected pcb,
614 * unspecified IPv6 address can be used to confuse us.
616 * Note that packets with unspecified IPv6 destination is
617 * already dropped in ip6_input.
619 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
624 th = NULL; /* XXX: Avoid compiler warning. */
628 * Get IP and TCP header together in first mbuf.
629 * Note: IP leaves IP header in first mbuf.
631 if (off0 > sizeof (struct ip)) {
632 ip_stripoptions(m, (struct mbuf *)0);
633 off0 = sizeof(struct ip);
635 if (m->m_len < sizeof (struct tcpiphdr)) {
636 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
638 TCPSTAT_INC(tcps_rcvshort);
642 ip = mtod(m, struct ip *);
643 ipov = (struct ipovly *)ip;
644 th = (struct tcphdr *)((caddr_t)ip + off0);
647 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
648 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
649 th->th_sum = m->m_pkthdr.csum_data;
651 th->th_sum = in_pseudo(ip->ip_src.s_addr,
653 htonl(m->m_pkthdr.csum_data +
656 th->th_sum ^= 0xffff;
658 ipov->ih_len = (u_short)tlen;
659 ipov->ih_len = htons(ipov->ih_len);
663 * Checksum extended TCP header and data.
665 len = sizeof (struct ip) + tlen;
666 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
667 ipov->ih_len = (u_short)tlen;
668 ipov->ih_len = htons(ipov->ih_len);
669 th->th_sum = in_cksum(m, len);
672 TCPSTAT_INC(tcps_rcvbadsum);
675 /* Re-initialization for later version check */
676 ip->ip_v = IPVERSION;
681 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
687 * Check that TCP offset makes sense,
688 * pull out TCP options and adjust length. XXX
690 off = th->th_off << 2;
691 if (off < sizeof (struct tcphdr) || off > tlen) {
692 TCPSTAT_INC(tcps_rcvbadoff);
695 tlen -= off; /* tlen is used instead of ti->ti_len */
696 if (off > sizeof (struct tcphdr)) {
699 IP6_EXTHDR_CHECK(m, off0, off, );
700 ip6 = mtod(m, struct ip6_hdr *);
701 th = (struct tcphdr *)((caddr_t)ip6 + off0);
704 if (m->m_len < sizeof(struct ip) + off) {
705 if ((m = m_pullup(m, sizeof (struct ip) + off))
707 TCPSTAT_INC(tcps_rcvshort);
710 ip = mtod(m, struct ip *);
711 ipov = (struct ipovly *)ip;
712 th = (struct tcphdr *)((caddr_t)ip + off0);
715 optlen = off - sizeof (struct tcphdr);
716 optp = (u_char *)(th + 1);
718 thflags = th->th_flags;
721 * Convert TCP protocol specific fields to host format.
723 tcp_fields_to_host(th);
726 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
728 drop_hdrlen = off0 + off;
731 * Locate pcb for segment, which requires a lock on tcbinfo.
732 * Optimisticaly acquire a global read lock rather than a write lock
733 * unless header flags necessarily imply a state change. There are
734 * two cases where we might discover later we need a write lock
735 * despite the flags: ACKs moving a connection out of the syncache,
736 * and ACKs for a connection in TIMEWAIT.
738 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
739 tcp_read_locking == 0) {
740 INP_INFO_WLOCK(&V_tcbinfo);
741 ti_locked = TI_WLOCKED;
743 INP_INFO_RLOCK(&V_tcbinfo);
744 ti_locked = TI_RLOCKED;
749 if (ti_locked == TI_RLOCKED)
750 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
751 else if (ti_locked == TI_WLOCKED)
752 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
754 panic("%s: findpcb ti_locked %d\n", __func__, ti_locked);
757 #ifdef IPFIREWALL_FORWARD
759 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
761 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
763 if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */
764 struct sockaddr_in *next_hop;
766 next_hop = (struct sockaddr_in *)(fwd_tag+1);
768 * Transparently forwarded. Pretend to be the destination.
769 * already got one like this?
771 inp = in_pcblookup_hash(&V_tcbinfo,
772 ip->ip_src, th->th_sport,
773 ip->ip_dst, th->th_dport,
774 0, m->m_pkthdr.rcvif);
776 /* It's new. Try to find the ambushing socket. */
777 inp = in_pcblookup_hash(&V_tcbinfo,
778 ip->ip_src, th->th_sport,
781 ntohs(next_hop->sin_port) :
786 /* Remove the tag from the packet. We don't need it anymore. */
787 m_tag_delete(m, fwd_tag);
789 #endif /* IPFIREWALL_FORWARD */
793 inp = in6_pcblookup_hash(&V_tcbinfo,
794 &ip6->ip6_src, th->th_sport,
795 &ip6->ip6_dst, th->th_dport,
800 inp = in_pcblookup_hash(&V_tcbinfo,
801 ip->ip_src, th->th_sport,
802 ip->ip_dst, th->th_dport,
808 * If the INPCB does not exist then all data in the incoming
809 * segment is discarded and an appropriate RST is sent back.
810 * XXX MRT Send RST using which routing table?
814 * Log communication attempts to ports that are not
817 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
818 tcp_log_in_vain == 2) {
819 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
820 log(LOG_INFO, "%s; %s: Connection attempt "
821 "to closed port\n", s, __func__);
824 * When blackholing do not respond with a RST but
825 * completely ignore the segment and drop it.
827 if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
831 rstreason = BANDLIM_RST_CLOSEDPORT;
835 if (!(inp->inp_flags & INP_HW_FLOWID)
836 && (m->m_flags & M_FLOWID)
837 && ((inp->inp_socket == NULL)
838 || !(inp->inp_socket->so_options & SO_ACCEPTCONN))) {
839 inp->inp_flags |= INP_HW_FLOWID;
840 inp->inp_flags &= ~INP_SW_FLOWID;
841 inp->inp_flowid = m->m_pkthdr.flowid;
845 if (isipv6 && ipsec6_in_reject(m, inp)) {
846 V_ipsec6stat.in_polvio++;
850 if (ipsec4_in_reject(m, inp) != 0) {
851 V_ipsec4stat.in_polvio++;
857 * Check the minimum TTL for socket.
859 if (inp->inp_ip_minttl != 0) {
861 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim)
865 if (inp->inp_ip_minttl > ip->ip_ttl)
870 * A previous connection in TIMEWAIT state is supposed to catch stray
871 * or duplicate segments arriving late. If this segment was a
872 * legitimate new connection attempt the old INPCB gets removed and
873 * we can try again to find a listening socket.
875 * At this point, due to earlier optimism, we may hold a read lock on
876 * the inpcbinfo, rather than a write lock. If so, we need to
877 * upgrade, or if that fails, acquire a reference on the inpcb, drop
878 * all locks, acquire a global write lock, and then re-acquire the
879 * inpcb lock. We may at that point discover that another thread has
880 * tried to free the inpcb, in which case we need to loop back and
881 * try to find a new inpcb to deliver to.
884 if (inp->inp_flags & INP_TIMEWAIT) {
885 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED,
886 ("%s: INP_TIMEWAIT ti_locked %d", __func__, ti_locked));
888 if (ti_locked == TI_RLOCKED) {
889 if (INP_INFO_TRY_UPGRADE(&V_tcbinfo) == 0) {
892 INP_INFO_RUNLOCK(&V_tcbinfo);
893 INP_INFO_WLOCK(&V_tcbinfo);
894 ti_locked = TI_WLOCKED;
896 if (in_pcbrele(inp)) {
901 ti_locked = TI_WLOCKED;
903 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
906 tcp_dooptions(&to, optp, optlen,
907 (thflags & TH_SYN) ? TO_SYN : 0);
908 if (sig_checked == 0) {
910 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
911 rstreason = BANDLIM_RST_CLOSEDPORT;
914 if (!tcp_signature_verify_input(m, off0, tlen, optlen,
915 &to, th, tp->t_flags))
920 if (thflags & TH_SYN)
921 tcp_dooptions(&to, optp, optlen, TO_SYN);
924 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
926 if (tcp_twcheck(inp, &to, th, m, tlen))
928 INP_INFO_WUNLOCK(&V_tcbinfo);
932 * The TCPCB may no longer exist if the connection is winding
933 * down or it is in the CLOSED state. Either way we drop the
934 * segment and send an appropriate response.
937 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
938 rstreason = BANDLIM_RST_CLOSEDPORT;
943 * We've identified a valid inpcb, but it could be that we need an
944 * inpcbinfo write lock and have only a read lock. In this case,
945 * attempt to upgrade/relock using the same strategy as the TIMEWAIT
946 * case above. If we relock, we have to jump back to 'relocked' as
947 * the connection might now be in TIMEWAIT.
949 if (tp->t_state != TCPS_ESTABLISHED ||
950 (thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
951 tcp_read_locking == 0) {
952 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED,
953 ("%s: upgrade check ti_locked %d", __func__, ti_locked));
955 if (ti_locked == TI_RLOCKED) {
956 if (INP_INFO_TRY_UPGRADE(&V_tcbinfo) == 0) {
959 INP_INFO_RUNLOCK(&V_tcbinfo);
960 INP_INFO_WLOCK(&V_tcbinfo);
961 ti_locked = TI_WLOCKED;
963 if (in_pcbrele(inp)) {
969 ti_locked = TI_WLOCKED;
971 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
975 INP_WLOCK_ASSERT(inp);
976 if (mac_inpcb_check_deliver(inp, m))
979 so = inp->inp_socket;
980 KASSERT(so != NULL, ("%s: so == NULL", __func__));
982 if (so->so_options & SO_DEBUG) {
983 ostate = tp->t_state;
986 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
989 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
994 * When the socket is accepting connections (the INPCB is in LISTEN
995 * state) we look into the SYN cache if this is a new connection
996 * attempt or the completion of a previous one.
998 if (so->so_options & SO_ACCEPTCONN) {
999 struct in_conninfo inc;
1001 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
1002 "tp not listening", __func__));
1004 bzero(&inc, sizeof(inc));
1007 inc.inc_flags |= INC_ISIPV6;
1008 inc.inc6_faddr = ip6->ip6_src;
1009 inc.inc6_laddr = ip6->ip6_dst;
1013 inc.inc_faddr = ip->ip_src;
1014 inc.inc_laddr = ip->ip_dst;
1016 inc.inc_fport = th->th_sport;
1017 inc.inc_lport = th->th_dport;
1018 inc.inc_fibnum = so->so_fibnum;
1021 * Check for an existing connection attempt in syncache if
1022 * the flag is only ACK. A successful lookup creates a new
1023 * socket appended to the listen queue in SYN_RECEIVED state.
1025 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1027 * Parse the TCP options here because
1028 * syncookies need access to the reflected
1031 tcp_dooptions(&to, optp, optlen, 0);
1033 * NB: syncache_expand() doesn't unlock
1034 * inp and tcpinfo locks.
1036 if (!syncache_expand(&inc, &to, th, &so, m)) {
1038 * No syncache entry or ACK was not
1039 * for our SYN/ACK. Send a RST.
1040 * NB: syncache did its own logging
1041 * of the failure cause.
1043 rstreason = BANDLIM_RST_OPENPORT;
1048 * We completed the 3-way handshake
1049 * but could not allocate a socket
1050 * either due to memory shortage,
1051 * listen queue length limits or
1052 * global socket limits. Send RST
1053 * or wait and have the remote end
1054 * retransmit the ACK for another
1057 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1058 log(LOG_DEBUG, "%s; %s: Listen socket: "
1059 "Socket allocation failed due to "
1060 "limits or memory shortage, %s\n",
1062 V_tcp_sc_rst_sock_fail ?
1063 "sending RST" : "try again");
1064 if (V_tcp_sc_rst_sock_fail) {
1065 rstreason = BANDLIM_UNLIMITED;
1071 * Socket is created in state SYN_RECEIVED.
1072 * Unlock the listen socket, lock the newly
1073 * created socket and update the tp variable.
1075 INP_WUNLOCK(inp); /* listen socket */
1076 inp = sotoinpcb(so);
1077 INP_WLOCK(inp); /* new connection */
1078 tp = intotcpcb(inp);
1079 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1080 ("%s: ", __func__));
1081 #ifdef TCP_SIGNATURE
1082 if (sig_checked == 0) {
1083 tcp_dooptions(&to, optp, optlen,
1084 (thflags & TH_SYN) ? TO_SYN : 0);
1085 if (!tcp_signature_verify_input(m, off0, tlen,
1086 optlen, &to, th, tp->t_flags)) {
1089 * In SYN_SENT state if it receives an
1090 * RST, it is allowed for further
1093 if ((thflags & TH_RST) == 0 ||
1094 (tp->t_state == TCPS_SYN_SENT) == 0)
1102 * Process the segment and the data it
1103 * contains. tcp_do_segment() consumes
1104 * the mbuf chain and unlocks the inpcb.
1106 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1108 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1112 * Segment flag validation for new connection attempts:
1114 * Our (SYN|ACK) response was rejected.
1115 * Check with syncache and remove entry to prevent
1118 * NB: syncache_chkrst does its own logging of failure
1121 if (thflags & TH_RST) {
1122 syncache_chkrst(&inc, th);
1126 * We can't do anything without SYN.
1128 if ((thflags & TH_SYN) == 0) {
1129 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1130 log(LOG_DEBUG, "%s; %s: Listen socket: "
1131 "SYN is missing, segment ignored\n",
1133 TCPSTAT_INC(tcps_badsyn);
1137 * (SYN|ACK) is bogus on a listen socket.
1139 if (thflags & TH_ACK) {
1140 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1141 log(LOG_DEBUG, "%s; %s: Listen socket: "
1142 "SYN|ACK invalid, segment rejected\n",
1144 syncache_badack(&inc); /* XXX: Not needed! */
1145 TCPSTAT_INC(tcps_badsyn);
1146 rstreason = BANDLIM_RST_OPENPORT;
1150 * If the drop_synfin option is enabled, drop all
1151 * segments with both the SYN and FIN bits set.
1152 * This prevents e.g. nmap from identifying the
1154 * XXX: Poor reasoning. nmap has other methods
1155 * and is constantly refining its stack detection
1157 * XXX: This is a violation of the TCP specification
1158 * and was used by RFC1644.
1160 if ((thflags & TH_FIN) && V_drop_synfin) {
1161 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1162 log(LOG_DEBUG, "%s; %s: Listen socket: "
1163 "SYN|FIN segment ignored (based on "
1164 "sysctl setting)\n", s, __func__);
1165 TCPSTAT_INC(tcps_badsyn);
1169 * Segment's flags are (SYN) or (SYN|FIN).
1171 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1172 * as they do not affect the state of the TCP FSM.
1173 * The data pointed to by TH_URG and th_urp is ignored.
1175 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1176 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1177 KASSERT(thflags & (TH_SYN),
1178 ("%s: Listen socket: TH_SYN not set", __func__));
1181 * If deprecated address is forbidden,
1182 * we do not accept SYN to deprecated interface
1183 * address to prevent any new inbound connection from
1184 * getting established.
1185 * When we do not accept SYN, we send a TCP RST,
1186 * with deprecated source address (instead of dropping
1187 * it). We compromise it as it is much better for peer
1188 * to send a RST, and RST will be the final packet
1191 * If we do not forbid deprecated addresses, we accept
1192 * the SYN packet. RFC2462 does not suggest dropping
1194 * If we decipher RFC2462 5.5.4, it says like this:
1195 * 1. use of deprecated addr with existing
1196 * communication is okay - "SHOULD continue to be
1198 * 2. use of it with new communication:
1199 * (2a) "SHOULD NOT be used if alternate address
1200 * with sufficient scope is available"
1201 * (2b) nothing mentioned otherwise.
1202 * Here we fall into (2b) case as we have no choice in
1203 * our source address selection - we must obey the peer.
1205 * The wording in RFC2462 is confusing, and there are
1206 * multiple description text for deprecated address
1207 * handling - worse, they are not exactly the same.
1208 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1210 if (isipv6 && !V_ip6_use_deprecated) {
1211 struct in6_ifaddr *ia6;
1213 ia6 = ip6_getdstifaddr(m);
1215 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1216 ifa_free(&ia6->ia_ifa);
1217 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1218 log(LOG_DEBUG, "%s; %s: Listen socket: "
1219 "Connection attempt to deprecated "
1220 "IPv6 address rejected\n",
1222 rstreason = BANDLIM_RST_OPENPORT;
1225 ifa_free(&ia6->ia_ifa);
1229 * Basic sanity checks on incoming SYN requests:
1230 * Don't respond if the destination is a link layer
1231 * broadcast according to RFC1122 4.2.3.10, p. 104.
1232 * If it is from this socket it must be forged.
1233 * Don't respond if the source or destination is a
1234 * global or subnet broad- or multicast address.
1235 * Note that it is quite possible to receive unicast
1236 * link-layer packets with a broadcast IP address. Use
1237 * in_broadcast() to find them.
1239 if (m->m_flags & (M_BCAST|M_MCAST)) {
1240 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1241 log(LOG_DEBUG, "%s; %s: Listen socket: "
1242 "Connection attempt from broad- or multicast "
1243 "link layer address ignored\n", s, __func__);
1248 if (th->th_dport == th->th_sport &&
1249 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1250 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1251 log(LOG_DEBUG, "%s; %s: Listen socket: "
1252 "Connection attempt to/from self "
1253 "ignored\n", s, __func__);
1256 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1257 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1258 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1259 log(LOG_DEBUG, "%s; %s: Listen socket: "
1260 "Connection attempt from/to multicast "
1261 "address ignored\n", s, __func__);
1266 if (th->th_dport == th->th_sport &&
1267 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1268 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1269 log(LOG_DEBUG, "%s; %s: Listen socket: "
1270 "Connection attempt from/to self "
1271 "ignored\n", s, __func__);
1274 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1275 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1276 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1277 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1278 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1279 log(LOG_DEBUG, "%s; %s: Listen socket: "
1280 "Connection attempt from/to broad- "
1281 "or multicast address ignored\n",
1287 * SYN appears to be valid. Create compressed TCP state
1291 if (so->so_options & SO_DEBUG)
1292 tcp_trace(TA_INPUT, ostate, tp,
1293 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1295 tcp_dooptions(&to, optp, optlen, TO_SYN);
1296 syncache_add(&inc, &to, th, inp, &so, m);
1298 * Entry added to syncache and mbuf consumed.
1299 * Everything already unlocked by syncache_add().
1301 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1305 #ifdef TCP_SIGNATURE
1306 if (sig_checked == 0) {
1307 tcp_dooptions(&to, optp, optlen,
1308 (thflags & TH_SYN) ? TO_SYN : 0);
1309 if (!tcp_signature_verify_input(m, off0, tlen, optlen, &to,
1313 * In SYN_SENT state if it receives an RST, it is
1314 * allowed for further processing.
1316 if ((thflags & TH_RST) == 0 ||
1317 (tp->t_state == TCPS_SYN_SENT) == 0)
1325 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1326 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1327 * the inpcb, and unlocks pcbinfo.
1329 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked);
1330 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1334 if (ti_locked == TI_RLOCKED)
1335 INP_INFO_RUNLOCK(&V_tcbinfo);
1336 else if (ti_locked == TI_WLOCKED)
1337 INP_INFO_WUNLOCK(&V_tcbinfo);
1339 panic("%s: dropwithreset ti_locked %d", __func__, ti_locked);
1340 ti_locked = TI_UNLOCKED;
1343 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1346 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1347 m = NULL; /* mbuf chain got consumed. */
1351 if (ti_locked == TI_RLOCKED)
1352 INP_INFO_RUNLOCK(&V_tcbinfo);
1353 else if (ti_locked == TI_WLOCKED)
1354 INP_INFO_WUNLOCK(&V_tcbinfo);
1356 panic("%s: dropunlock ti_locked %d", __func__, ti_locked);
1357 ti_locked = TI_UNLOCKED;
1363 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1371 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1372 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos,
1375 int thflags, acked, ourfinisacked, needoutput = 0;
1376 int rstreason, todrop, win;
1382 * The size of tcp_saveipgen must be the size of the max ip header,
1385 u_char tcp_saveipgen[IP6_HDR_LEN];
1386 struct tcphdr tcp_savetcp;
1389 thflags = th->th_flags;
1390 tp->sackhint.last_sack_ack = 0;
1393 * If this is either a state-changing packet or current state isn't
1394 * established, we require a write lock on tcbinfo. Otherwise, we
1395 * allow either a read lock or a write lock, as we may have acquired
1396 * a write lock due to a race.
1398 * Require a global write lock for SYN/FIN/RST segments or
1399 * non-established connections; otherwise accept either a read or
1400 * write lock, as we may have conservatively acquired a write lock in
1401 * certain cases in tcp_input() (is this still true?). Currently we
1402 * will never enter with no lock, so we try to drop it quickly in the
1403 * common pure ack/pure data cases.
1405 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
1406 tp->t_state != TCPS_ESTABLISHED) {
1407 KASSERT(ti_locked == TI_WLOCKED, ("%s ti_locked %d for "
1408 "SYN/FIN/RST/!EST", __func__, ti_locked));
1409 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1412 if (ti_locked == TI_RLOCKED)
1413 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1414 else if (ti_locked == TI_WLOCKED)
1415 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1417 panic("%s: ti_locked %d for EST", __func__,
1421 INP_WLOCK_ASSERT(tp->t_inpcb);
1422 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1424 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1428 * Segment received on connection.
1429 * Reset idle time and keep-alive timer.
1430 * XXX: This should be done after segment
1431 * validation to ignore broken/spoofed segs.
1433 tp->t_rcvtime = ticks;
1434 if (TCPS_HAVEESTABLISHED(tp->t_state))
1435 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
1438 * Unscale the window into a 32-bit value.
1439 * For the SYN_SENT state the scale is zero.
1441 tiwin = th->th_win << tp->snd_scale;
1444 * TCP ECN processing.
1446 if (tp->t_flags & TF_ECN_PERMIT) {
1447 if (thflags & TH_CWR)
1448 tp->t_flags &= ~TF_ECN_SND_ECE;
1449 switch (iptos & IPTOS_ECN_MASK) {
1451 tp->t_flags |= TF_ECN_SND_ECE;
1452 TCPSTAT_INC(tcps_ecn_ce);
1454 case IPTOS_ECN_ECT0:
1455 TCPSTAT_INC(tcps_ecn_ect0);
1457 case IPTOS_ECN_ECT1:
1458 TCPSTAT_INC(tcps_ecn_ect1);
1461 /* Congestion experienced. */
1462 if (thflags & TH_ECE) {
1463 cc_cong_signal(tp, th, CC_ECN);
1468 * Parse options on any incoming segment.
1470 tcp_dooptions(&to, (u_char *)(th + 1),
1471 (th->th_off << 2) - sizeof(struct tcphdr),
1472 (thflags & TH_SYN) ? TO_SYN : 0);
1475 * If echoed timestamp is later than the current time,
1476 * fall back to non RFC1323 RTT calculation. Normalize
1477 * timestamp if syncookies were used when this connection
1480 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1481 to.to_tsecr -= tp->ts_offset;
1482 if (TSTMP_GT(to.to_tsecr, ticks))
1487 * Process options only when we get SYN/ACK back. The SYN case
1488 * for incoming connections is handled in tcp_syncache.
1489 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1490 * or <SYN,ACK>) segment itself is never scaled.
1491 * XXX this is traditional behavior, may need to be cleaned up.
1493 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1494 if ((to.to_flags & TOF_SCALE) &&
1495 (tp->t_flags & TF_REQ_SCALE)) {
1496 tp->t_flags |= TF_RCVD_SCALE;
1497 tp->snd_scale = to.to_wscale;
1500 * Initial send window. It will be updated with
1501 * the next incoming segment to the scaled value.
1503 tp->snd_wnd = th->th_win;
1504 if (to.to_flags & TOF_TS) {
1505 tp->t_flags |= TF_RCVD_TSTMP;
1506 tp->ts_recent = to.to_tsval;
1507 tp->ts_recent_age = ticks;
1509 if (to.to_flags & TOF_MSS)
1510 tcp_mss(tp, to.to_mss);
1511 if ((tp->t_flags & TF_SACK_PERMIT) &&
1512 (to.to_flags & TOF_SACKPERM) == 0)
1513 tp->t_flags &= ~TF_SACK_PERMIT;
1517 * Header prediction: check for the two common cases
1518 * of a uni-directional data xfer. If the packet has
1519 * no control flags, is in-sequence, the window didn't
1520 * change and we're not retransmitting, it's a
1521 * candidate. If the length is zero and the ack moved
1522 * forward, we're the sender side of the xfer. Just
1523 * free the data acked & wake any higher level process
1524 * that was blocked waiting for space. If the length
1525 * is non-zero and the ack didn't move, we're the
1526 * receiver side. If we're getting packets in-order
1527 * (the reassembly queue is empty), add the data to
1528 * the socket buffer and note that we need a delayed ack.
1529 * Make sure that the hidden state-flags are also off.
1530 * Since we check for TCPS_ESTABLISHED first, it can only
1533 if (tp->t_state == TCPS_ESTABLISHED &&
1534 th->th_seq == tp->rcv_nxt &&
1535 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1536 tp->snd_nxt == tp->snd_max &&
1537 tiwin && tiwin == tp->snd_wnd &&
1538 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1539 LIST_EMPTY(&tp->t_segq) &&
1540 ((to.to_flags & TOF_TS) == 0 ||
1541 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1544 * If last ACK falls within this segment's sequence numbers,
1545 * record the timestamp.
1546 * NOTE that the test is modified according to the latest
1547 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1549 if ((to.to_flags & TOF_TS) != 0 &&
1550 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1551 tp->ts_recent_age = ticks;
1552 tp->ts_recent = to.to_tsval;
1556 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1557 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1558 !IN_RECOVERY(tp->t_flags) &&
1559 (to.to_flags & TOF_SACK) == 0 &&
1560 TAILQ_EMPTY(&tp->snd_holes)) {
1562 * This is a pure ack for outstanding data.
1564 if (ti_locked == TI_RLOCKED)
1565 INP_INFO_RUNLOCK(&V_tcbinfo);
1566 else if (ti_locked == TI_WLOCKED)
1567 INP_INFO_WUNLOCK(&V_tcbinfo);
1569 panic("%s: ti_locked %d on pure ACK",
1570 __func__, ti_locked);
1571 ti_locked = TI_UNLOCKED;
1573 TCPSTAT_INC(tcps_predack);
1576 * "bad retransmit" recovery.
1578 if (tp->t_rxtshift == 1 &&
1579 tp->t_flags & TF_PREVVALID &&
1580 (int)(ticks - tp->t_badrxtwin) < 0) {
1581 cc_cong_signal(tp, th, CC_RTO_ERR);
1585 * Recalculate the transmit timer / rtt.
1587 * Some boxes send broken timestamp replies
1588 * during the SYN+ACK phase, ignore
1589 * timestamps of 0 or we could calculate a
1590 * huge RTT and blow up the retransmit timer.
1592 if ((to.to_flags & TOF_TS) != 0 &&
1594 if (!tp->t_rttlow ||
1595 tp->t_rttlow > ticks - to.to_tsecr)
1596 tp->t_rttlow = ticks - to.to_tsecr;
1598 ticks - to.to_tsecr + 1);
1599 } else if (tp->t_rtttime &&
1600 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1601 if (!tp->t_rttlow ||
1602 tp->t_rttlow > ticks - tp->t_rtttime)
1603 tp->t_rttlow = ticks - tp->t_rtttime;
1605 ticks - tp->t_rtttime);
1607 tcp_xmit_bandwidth_limit(tp, th->th_ack);
1608 acked = BYTES_THIS_ACK(tp, th);
1610 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1611 hhook_run_tcp_est_in(tp, th, &to);
1613 TCPSTAT_INC(tcps_rcvackpack);
1614 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1615 sbdrop(&so->so_snd, acked);
1616 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1617 SEQ_LEQ(th->th_ack, tp->snd_recover))
1618 tp->snd_recover = th->th_ack - 1;
1621 * Let the congestion control algorithm update
1622 * congestion control related information. This
1623 * typically means increasing the congestion
1626 cc_ack_received(tp, th, CC_ACK);
1628 tp->snd_una = th->th_ack;
1630 * Pull snd_wl2 up to prevent seq wrap relative
1633 tp->snd_wl2 = th->th_ack;
1636 ND6_HINT(tp); /* Some progress has been made. */
1639 * If all outstanding data are acked, stop
1640 * retransmit timer, otherwise restart timer
1641 * using current (possibly backed-off) value.
1642 * If process is waiting for space,
1643 * wakeup/selwakeup/signal. If data
1644 * are ready to send, let tcp_output
1645 * decide between more output or persist.
1648 if (so->so_options & SO_DEBUG)
1649 tcp_trace(TA_INPUT, ostate, tp,
1650 (void *)tcp_saveipgen,
1653 if (tp->snd_una == tp->snd_max)
1654 tcp_timer_activate(tp, TT_REXMT, 0);
1655 else if (!tcp_timer_active(tp, TT_PERSIST))
1656 tcp_timer_activate(tp, TT_REXMT,
1659 if (so->so_snd.sb_cc)
1660 (void) tcp_output(tp);
1663 } else if (th->th_ack == tp->snd_una &&
1664 tlen <= sbspace(&so->so_rcv)) {
1665 int newsize = 0; /* automatic sockbuf scaling */
1668 * This is a pure, in-sequence data packet with
1669 * nothing on the reassembly queue and we have enough
1670 * buffer space to take it.
1672 if (ti_locked == TI_RLOCKED)
1673 INP_INFO_RUNLOCK(&V_tcbinfo);
1674 else if (ti_locked == TI_WLOCKED)
1675 INP_INFO_WUNLOCK(&V_tcbinfo);
1677 panic("%s: ti_locked %d on pure data "
1678 "segment", __func__, ti_locked);
1679 ti_locked = TI_UNLOCKED;
1681 /* Clean receiver SACK report if present */
1682 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1683 tcp_clean_sackreport(tp);
1684 TCPSTAT_INC(tcps_preddat);
1685 tp->rcv_nxt += tlen;
1687 * Pull snd_wl1 up to prevent seq wrap relative to
1690 tp->snd_wl1 = th->th_seq;
1692 * Pull rcv_up up to prevent seq wrap relative to
1695 tp->rcv_up = tp->rcv_nxt;
1696 TCPSTAT_INC(tcps_rcvpack);
1697 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1698 ND6_HINT(tp); /* Some progress has been made */
1700 if (so->so_options & SO_DEBUG)
1701 tcp_trace(TA_INPUT, ostate, tp,
1702 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1705 * Automatic sizing of receive socket buffer. Often the send
1706 * buffer size is not optimally adjusted to the actual network
1707 * conditions at hand (delay bandwidth product). Setting the
1708 * buffer size too small limits throughput on links with high
1709 * bandwidth and high delay (eg. trans-continental/oceanic links).
1711 * On the receive side the socket buffer memory is only rarely
1712 * used to any significant extent. This allows us to be much
1713 * more aggressive in scaling the receive socket buffer. For
1714 * the case that the buffer space is actually used to a large
1715 * extent and we run out of kernel memory we can simply drop
1716 * the new segments; TCP on the sender will just retransmit it
1717 * later. Setting the buffer size too big may only consume too
1718 * much kernel memory if the application doesn't read() from
1719 * the socket or packet loss or reordering makes use of the
1722 * The criteria to step up the receive buffer one notch are:
1723 * 1. the number of bytes received during the time it takes
1724 * one timestamp to be reflected back to us (the RTT);
1725 * 2. received bytes per RTT is within seven eighth of the
1726 * current socket buffer size;
1727 * 3. receive buffer size has not hit maximal automatic size;
1729 * This algorithm does one step per RTT at most and only if
1730 * we receive a bulk stream w/o packet losses or reorderings.
1731 * Shrinking the buffer during idle times is not necessary as
1732 * it doesn't consume any memory when idle.
1734 * TODO: Only step up if the application is actually serving
1735 * the buffer to better manage the socket buffer resources.
1737 if (V_tcp_do_autorcvbuf &&
1739 (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
1740 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) &&
1741 to.to_tsecr - tp->rfbuf_ts < hz) {
1743 (so->so_rcv.sb_hiwat / 8 * 7) &&
1744 so->so_rcv.sb_hiwat <
1745 V_tcp_autorcvbuf_max) {
1747 min(so->so_rcv.sb_hiwat +
1748 V_tcp_autorcvbuf_inc,
1749 V_tcp_autorcvbuf_max);
1751 /* Start over with next RTT. */
1755 tp->rfbuf_cnt += tlen; /* add up */
1758 /* Add data to socket buffer. */
1759 SOCKBUF_LOCK(&so->so_rcv);
1760 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1764 * Set new socket buffer size.
1765 * Give up when limit is reached.
1768 if (!sbreserve_locked(&so->so_rcv,
1770 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1771 m_adj(m, drop_hdrlen); /* delayed header drop */
1772 sbappendstream_locked(&so->so_rcv, m);
1774 /* NB: sorwakeup_locked() does an implicit unlock. */
1775 sorwakeup_locked(so);
1776 if (DELAY_ACK(tp)) {
1777 tp->t_flags |= TF_DELACK;
1779 tp->t_flags |= TF_ACKNOW;
1787 * Calculate amount of space in receive window,
1788 * and then do TCP input processing.
1789 * Receive window is amount of space in rcv queue,
1790 * but not less than advertised window.
1792 win = sbspace(&so->so_rcv);
1795 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1797 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1801 switch (tp->t_state) {
1804 * If the state is SYN_RECEIVED:
1805 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1807 case TCPS_SYN_RECEIVED:
1808 if ((thflags & TH_ACK) &&
1809 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1810 SEQ_GT(th->th_ack, tp->snd_max))) {
1811 rstreason = BANDLIM_RST_OPENPORT;
1817 * If the state is SYN_SENT:
1818 * if seg contains an ACK, but not for our SYN, drop the input.
1819 * if seg contains a RST, then drop the connection.
1820 * if seg does not contain SYN, then drop it.
1821 * Otherwise this is an acceptable SYN segment
1822 * initialize tp->rcv_nxt and tp->irs
1823 * if seg contains ack then advance tp->snd_una
1824 * if seg contains an ECE and ECN support is enabled, the stream
1826 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1827 * arrange for segment to be acked (eventually)
1828 * continue processing rest of data/controls, beginning with URG
1831 if ((thflags & TH_ACK) &&
1832 (SEQ_LEQ(th->th_ack, tp->iss) ||
1833 SEQ_GT(th->th_ack, tp->snd_max))) {
1834 rstreason = BANDLIM_UNLIMITED;
1837 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST))
1838 tp = tcp_drop(tp, ECONNREFUSED);
1839 if (thflags & TH_RST)
1841 if (!(thflags & TH_SYN))
1844 tp->irs = th->th_seq;
1846 if (thflags & TH_ACK) {
1847 TCPSTAT_INC(tcps_connects);
1850 mac_socketpeer_set_from_mbuf(m, so);
1852 /* Do window scaling on this connection? */
1853 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1854 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1855 tp->rcv_scale = tp->request_r_scale;
1857 tp->rcv_adv += imin(tp->rcv_wnd,
1858 TCP_MAXWIN << tp->rcv_scale);
1859 tp->snd_una++; /* SYN is acked */
1861 * If there's data, delay ACK; if there's also a FIN
1862 * ACKNOW will be turned on later.
1864 if (DELAY_ACK(tp) && tlen != 0)
1865 tcp_timer_activate(tp, TT_DELACK,
1868 tp->t_flags |= TF_ACKNOW;
1870 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
1871 tp->t_flags |= TF_ECN_PERMIT;
1872 TCPSTAT_INC(tcps_ecn_shs);
1876 * Received <SYN,ACK> in SYN_SENT[*] state.
1878 * SYN_SENT --> ESTABLISHED
1879 * SYN_SENT* --> FIN_WAIT_1
1881 tp->t_starttime = ticks;
1882 if (tp->t_flags & TF_NEEDFIN) {
1883 tp->t_state = TCPS_FIN_WAIT_1;
1884 tp->t_flags &= ~TF_NEEDFIN;
1887 tp->t_state = TCPS_ESTABLISHED;
1889 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
1893 * Received initial SYN in SYN-SENT[*] state =>
1894 * simultaneous open. If segment contains CC option
1895 * and there is a cached CC, apply TAO test.
1896 * If it succeeds, connection is * half-synchronized.
1897 * Otherwise, do 3-way handshake:
1898 * SYN-SENT -> SYN-RECEIVED
1899 * SYN-SENT* -> SYN-RECEIVED*
1900 * If there was no CC option, clear cached CC value.
1902 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
1903 tcp_timer_activate(tp, TT_REXMT, 0);
1904 tp->t_state = TCPS_SYN_RECEIVED;
1907 KASSERT(ti_locked == TI_WLOCKED, ("%s: trimthenstep6: "
1908 "ti_locked %d", __func__, ti_locked));
1909 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1910 INP_WLOCK_ASSERT(tp->t_inpcb);
1913 * Advance th->th_seq to correspond to first data byte.
1914 * If data, trim to stay within window,
1915 * dropping FIN if necessary.
1918 if (tlen > tp->rcv_wnd) {
1919 todrop = tlen - tp->rcv_wnd;
1923 TCPSTAT_INC(tcps_rcvpackafterwin);
1924 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
1926 tp->snd_wl1 = th->th_seq - 1;
1927 tp->rcv_up = th->th_seq;
1929 * Client side of transaction: already sent SYN and data.
1930 * If the remote host used T/TCP to validate the SYN,
1931 * our data will be ACK'd; if so, enter normal data segment
1932 * processing in the middle of step 5, ack processing.
1933 * Otherwise, goto step 6.
1935 if (thflags & TH_ACK)
1941 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
1942 * do normal processing.
1944 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
1948 break; /* continue normal processing */
1952 * States other than LISTEN or SYN_SENT.
1953 * First check the RST flag and sequence number since reset segments
1954 * are exempt from the timestamp and connection count tests. This
1955 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
1956 * below which allowed reset segments in half the sequence space
1957 * to fall though and be processed (which gives forged reset
1958 * segments with a random sequence number a 50 percent chance of
1959 * killing a connection).
1960 * Then check timestamp, if present.
1961 * Then check the connection count, if present.
1962 * Then check that at least some bytes of segment are within
1963 * receive window. If segment begins before rcv_nxt,
1964 * drop leading data (and SYN); if nothing left, just ack.
1967 * If the RST bit is set, check the sequence number to see
1968 * if this is a valid reset segment.
1970 * In all states except SYN-SENT, all reset (RST) segments
1971 * are validated by checking their SEQ-fields. A reset is
1972 * valid if its sequence number is in the window.
1973 * Note: this does not take into account delayed ACKs, so
1974 * we should test against last_ack_sent instead of rcv_nxt.
1975 * The sequence number in the reset segment is normally an
1976 * echo of our outgoing acknowlegement numbers, but some hosts
1977 * send a reset with the sequence number at the rightmost edge
1978 * of our receive window, and we have to handle this case.
1979 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
1980 * that brute force RST attacks are possible. To combat this,
1981 * we use a much stricter check while in the ESTABLISHED state,
1982 * only accepting RSTs where the sequence number is equal to
1983 * last_ack_sent. In all other states (the states in which a
1984 * RST is more likely), the more permissive check is used.
1985 * If we have multiple segments in flight, the initial reset
1986 * segment sequence numbers will be to the left of last_ack_sent,
1987 * but they will eventually catch up.
1988 * In any case, it never made sense to trim reset segments to
1989 * fit the receive window since RFC 1122 says:
1990 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
1992 * A TCP SHOULD allow a received RST segment to include data.
1995 * It has been suggested that a RST segment could contain
1996 * ASCII text that encoded and explained the cause of the
1997 * RST. No standard has yet been established for such
2000 * If the reset segment passes the sequence number test examine
2002 * SYN_RECEIVED STATE:
2003 * If passive open, return to LISTEN state.
2004 * If active open, inform user that connection was refused.
2005 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
2006 * Inform user that connection was reset, and close tcb.
2007 * CLOSING, LAST_ACK STATES:
2010 * Drop the segment - see Stevens, vol. 2, p. 964 and
2013 if (thflags & TH_RST) {
2014 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2015 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2016 switch (tp->t_state) {
2018 case TCPS_SYN_RECEIVED:
2019 so->so_error = ECONNREFUSED;
2022 case TCPS_ESTABLISHED:
2023 if (V_tcp_insecure_rst == 0 &&
2024 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) &&
2025 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) &&
2026 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2027 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) {
2028 TCPSTAT_INC(tcps_badrst);
2032 case TCPS_FIN_WAIT_1:
2033 case TCPS_FIN_WAIT_2:
2034 case TCPS_CLOSE_WAIT:
2035 so->so_error = ECONNRESET;
2037 KASSERT(ti_locked == TI_WLOCKED,
2038 ("tcp_do_segment: TH_RST 1 ti_locked %d",
2040 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2042 tp->t_state = TCPS_CLOSED;
2043 TCPSTAT_INC(tcps_drops);
2049 KASSERT(ti_locked == TI_WLOCKED,
2050 ("tcp_do_segment: TH_RST 2 ti_locked %d",
2052 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2062 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2063 * and it's less than ts_recent, drop it.
2065 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2066 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2068 /* Check to see if ts_recent is over 24 days old. */
2069 if (ticks - tp->ts_recent_age > TCP_PAWS_IDLE) {
2071 * Invalidate ts_recent. If this segment updates
2072 * ts_recent, the age will be reset later and ts_recent
2073 * will get a valid value. If it does not, setting
2074 * ts_recent to zero will at least satisfy the
2075 * requirement that zero be placed in the timestamp
2076 * echo reply when ts_recent isn't valid. The
2077 * age isn't reset until we get a valid ts_recent
2078 * because we don't want out-of-order segments to be
2079 * dropped when ts_recent is old.
2083 TCPSTAT_INC(tcps_rcvduppack);
2084 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2085 TCPSTAT_INC(tcps_pawsdrop);
2093 * In the SYN-RECEIVED state, validate that the packet belongs to
2094 * this connection before trimming the data to fit the receive
2095 * window. Check the sequence number versus IRS since we know
2096 * the sequence numbers haven't wrapped. This is a partial fix
2097 * for the "LAND" DoS attack.
2099 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2100 rstreason = BANDLIM_RST_OPENPORT;
2104 todrop = tp->rcv_nxt - th->th_seq;
2107 * If this is a duplicate SYN for our current connection,
2108 * advance over it and pretend and it's not a SYN.
2110 if (thflags & TH_SYN && th->th_seq == tp->irs) {
2120 * Following if statement from Stevens, vol. 2, p. 960.
2123 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2125 * Any valid FIN must be to the left of the window.
2126 * At this point the FIN must be a duplicate or out
2127 * of sequence; drop it.
2132 * Send an ACK to resynchronize and drop any data.
2133 * But keep on processing for RST or ACK.
2135 tp->t_flags |= TF_ACKNOW;
2137 TCPSTAT_INC(tcps_rcvduppack);
2138 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2140 TCPSTAT_INC(tcps_rcvpartduppack);
2141 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2143 drop_hdrlen += todrop; /* drop from the top afterwards */
2144 th->th_seq += todrop;
2146 if (th->th_urp > todrop)
2147 th->th_urp -= todrop;
2155 * If new data are received on a connection after the
2156 * user processes are gone, then RST the other end.
2158 if ((so->so_state & SS_NOFDREF) &&
2159 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
2162 KASSERT(ti_locked == TI_WLOCKED, ("%s: SS_NOFDEREF && "
2163 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked));
2164 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2166 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
2167 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data after socket "
2168 "was closed, sending RST and removing tcpcb\n",
2169 s, __func__, tcpstates[tp->t_state], tlen);
2173 TCPSTAT_INC(tcps_rcvafterclose);
2174 rstreason = BANDLIM_UNLIMITED;
2179 * If segment ends after window, drop trailing data
2180 * (and PUSH and FIN); if nothing left, just ACK.
2182 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2184 TCPSTAT_INC(tcps_rcvpackafterwin);
2185 if (todrop >= tlen) {
2186 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2188 * If window is closed can only take segments at
2189 * window edge, and have to drop data and PUSH from
2190 * incoming segments. Continue processing, but
2191 * remember to ack. Otherwise, drop segment
2194 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2195 tp->t_flags |= TF_ACKNOW;
2196 TCPSTAT_INC(tcps_rcvwinprobe);
2200 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2203 thflags &= ~(TH_PUSH|TH_FIN);
2207 * If last ACK falls within this segment's sequence numbers,
2208 * record its timestamp.
2210 * 1) That the test incorporates suggestions from the latest
2211 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2212 * 2) That updating only on newer timestamps interferes with
2213 * our earlier PAWS tests, so this check should be solely
2214 * predicated on the sequence space of this segment.
2215 * 3) That we modify the segment boundary check to be
2216 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2217 * instead of RFC1323's
2218 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2219 * This modified check allows us to overcome RFC1323's
2220 * limitations as described in Stevens TCP/IP Illustrated
2221 * Vol. 2 p.869. In such cases, we can still calculate the
2222 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2224 if ((to.to_flags & TOF_TS) != 0 &&
2225 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2226 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2227 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2228 tp->ts_recent_age = ticks;
2229 tp->ts_recent = to.to_tsval;
2233 * If a SYN is in the window, then this is an
2234 * error and we send an RST and drop the connection.
2236 if (thflags & TH_SYN) {
2237 KASSERT(ti_locked == TI_WLOCKED,
2238 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked));
2239 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2241 tp = tcp_drop(tp, ECONNRESET);
2242 rstreason = BANDLIM_UNLIMITED;
2247 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2248 * flag is on (half-synchronized state), then queue data for
2249 * later processing; else drop segment and return.
2251 if ((thflags & TH_ACK) == 0) {
2252 if (tp->t_state == TCPS_SYN_RECEIVED ||
2253 (tp->t_flags & TF_NEEDSYN))
2255 else if (tp->t_flags & TF_ACKNOW)
2264 switch (tp->t_state) {
2267 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2268 * ESTABLISHED state and continue processing.
2269 * The ACK was checked above.
2271 case TCPS_SYN_RECEIVED:
2273 TCPSTAT_INC(tcps_connects);
2275 /* Do window scaling? */
2276 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2277 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2278 tp->rcv_scale = tp->request_r_scale;
2279 tp->snd_wnd = tiwin;
2283 * SYN-RECEIVED -> ESTABLISHED
2284 * SYN-RECEIVED* -> FIN-WAIT-1
2286 tp->t_starttime = ticks;
2287 if (tp->t_flags & TF_NEEDFIN) {
2288 tp->t_state = TCPS_FIN_WAIT_1;
2289 tp->t_flags &= ~TF_NEEDFIN;
2291 tp->t_state = TCPS_ESTABLISHED;
2293 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
2296 * If segment contains data or ACK, will call tcp_reass()
2297 * later; if not, do so now to pass queued data to user.
2299 if (tlen == 0 && (thflags & TH_FIN) == 0)
2300 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
2302 tp->snd_wl1 = th->th_seq - 1;
2306 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2307 * ACKs. If the ack is in the range
2308 * tp->snd_una < th->th_ack <= tp->snd_max
2309 * then advance tp->snd_una to th->th_ack and drop
2310 * data from the retransmission queue. If this ACK reflects
2311 * more up to date window information we update our window information.
2313 case TCPS_ESTABLISHED:
2314 case TCPS_FIN_WAIT_1:
2315 case TCPS_FIN_WAIT_2:
2316 case TCPS_CLOSE_WAIT:
2319 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2320 TCPSTAT_INC(tcps_rcvacktoomuch);
2323 if ((tp->t_flags & TF_SACK_PERMIT) &&
2324 ((to.to_flags & TOF_SACK) ||
2325 !TAILQ_EMPTY(&tp->snd_holes)))
2326 tcp_sack_doack(tp, &to, th->th_ack);
2328 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2329 hhook_run_tcp_est_in(tp, th, &to);
2331 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2332 if (tlen == 0 && tiwin == tp->snd_wnd) {
2333 TCPSTAT_INC(tcps_rcvdupack);
2335 * If we have outstanding data (other than
2336 * a window probe), this is a completely
2337 * duplicate ack (ie, window info didn't
2338 * change), the ack is the biggest we've
2339 * seen and we've seen exactly our rexmt
2340 * threshhold of them, assume a packet
2341 * has been dropped and retransmit it.
2342 * Kludge snd_nxt & the congestion
2343 * window so we send only this one
2346 * We know we're losing at the current
2347 * window size so do congestion avoidance
2348 * (set ssthresh to half the current window
2349 * and pull our congestion window back to
2350 * the new ssthresh).
2352 * Dup acks mean that packets have left the
2353 * network (they're now cached at the receiver)
2354 * so bump cwnd by the amount in the receiver
2355 * to keep a constant cwnd packets in the
2358 * When using TCP ECN, notify the peer that
2359 * we reduced the cwnd.
2361 if (!tcp_timer_active(tp, TT_REXMT) ||
2362 th->th_ack != tp->snd_una)
2364 else if (++tp->t_dupacks > tcprexmtthresh ||
2365 IN_FASTRECOVERY(tp->t_flags)) {
2366 cc_ack_received(tp, th, CC_DUPACK);
2367 if ((tp->t_flags & TF_SACK_PERMIT) &&
2368 IN_FASTRECOVERY(tp->t_flags)) {
2372 * Compute the amount of data in flight first.
2373 * We can inject new data into the pipe iff
2374 * we have less than 1/2 the original window's
2375 * worth of data in flight.
2377 awnd = (tp->snd_nxt - tp->snd_fack) +
2378 tp->sackhint.sack_bytes_rexmit;
2379 if (awnd < tp->snd_ssthresh) {
2380 tp->snd_cwnd += tp->t_maxseg;
2381 if (tp->snd_cwnd > tp->snd_ssthresh)
2382 tp->snd_cwnd = tp->snd_ssthresh;
2385 tp->snd_cwnd += tp->t_maxseg;
2386 (void) tcp_output(tp);
2388 } else if (tp->t_dupacks == tcprexmtthresh) {
2389 tcp_seq onxt = tp->snd_nxt;
2392 * If we're doing sack, check to
2393 * see if we're already in sack
2394 * recovery. If we're not doing sack,
2395 * check to see if we're in newreno
2398 if (tp->t_flags & TF_SACK_PERMIT) {
2399 if (IN_FASTRECOVERY(tp->t_flags)) {
2404 if (SEQ_LEQ(th->th_ack,
2410 /* Congestion signal before ack. */
2411 cc_cong_signal(tp, th, CC_NDUPACK);
2412 cc_ack_received(tp, th, CC_DUPACK);
2413 tcp_timer_activate(tp, TT_REXMT, 0);
2415 if (tp->t_flags & TF_SACK_PERMIT) {
2417 tcps_sack_recovery_episode);
2418 tp->sack_newdata = tp->snd_nxt;
2419 tp->snd_cwnd = tp->t_maxseg;
2420 (void) tcp_output(tp);
2423 tp->snd_nxt = th->th_ack;
2424 tp->snd_cwnd = tp->t_maxseg;
2425 (void) tcp_output(tp);
2426 KASSERT(tp->snd_limited <= 2,
2427 ("%s: tp->snd_limited too big",
2429 tp->snd_cwnd = tp->snd_ssthresh +
2431 (tp->t_dupacks - tp->snd_limited);
2432 if (SEQ_GT(onxt, tp->snd_nxt))
2435 } else if (V_tcp_do_rfc3042) {
2436 cc_ack_received(tp, th, CC_DUPACK);
2437 u_long oldcwnd = tp->snd_cwnd;
2438 tcp_seq oldsndmax = tp->snd_max;
2441 KASSERT(tp->t_dupacks == 1 ||
2443 ("%s: dupacks not 1 or 2",
2445 if (tp->t_dupacks == 1)
2446 tp->snd_limited = 0;
2448 (tp->snd_nxt - tp->snd_una) +
2449 (tp->t_dupacks - tp->snd_limited) *
2451 (void) tcp_output(tp);
2452 sent = tp->snd_max - oldsndmax;
2453 if (sent > tp->t_maxseg) {
2454 KASSERT((tp->t_dupacks == 2 &&
2455 tp->snd_limited == 0) ||
2456 (sent == tp->t_maxseg + 1 &&
2457 tp->t_flags & TF_SENTFIN),
2458 ("%s: sent too much",
2460 tp->snd_limited = 2;
2461 } else if (sent > 0)
2463 tp->snd_cwnd = oldcwnd;
2471 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2472 ("%s: th_ack <= snd_una", __func__));
2475 * If the congestion window was inflated to account
2476 * for the other side's cached packets, retract it.
2478 if (IN_FASTRECOVERY(tp->t_flags)) {
2479 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2480 if (tp->t_flags & TF_SACK_PERMIT)
2481 tcp_sack_partialack(tp, th);
2483 tcp_newreno_partial_ack(tp, th);
2485 cc_post_recovery(tp, th);
2489 * If we reach this point, ACK is not a duplicate,
2490 * i.e., it ACKs something we sent.
2492 if (tp->t_flags & TF_NEEDSYN) {
2494 * T/TCP: Connection was half-synchronized, and our
2495 * SYN has been ACK'd (so connection is now fully
2496 * synchronized). Go to non-starred state,
2497 * increment snd_una for ACK of SYN, and check if
2498 * we can do window scaling.
2500 tp->t_flags &= ~TF_NEEDSYN;
2502 /* Do window scaling? */
2503 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2504 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2505 tp->rcv_scale = tp->request_r_scale;
2506 /* Send window already scaled. */
2511 INP_INFO_LOCK_ASSERT(&V_tcbinfo);
2512 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED,
2513 ("tcp_input: process_ACK ti_locked %d", ti_locked));
2514 INP_WLOCK_ASSERT(tp->t_inpcb);
2516 acked = BYTES_THIS_ACK(tp, th);
2517 TCPSTAT_INC(tcps_rcvackpack);
2518 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2521 * If we just performed our first retransmit, and the ACK
2522 * arrives within our recovery window, then it was a mistake
2523 * to do the retransmit in the first place. Recover our
2524 * original cwnd and ssthresh, and proceed to transmit where
2527 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID &&
2528 (int)(ticks - tp->t_badrxtwin) < 0)
2529 cc_cong_signal(tp, th, CC_RTO_ERR);
2532 * If we have a timestamp reply, update smoothed
2533 * round trip time. If no timestamp is present but
2534 * transmit timer is running and timed sequence
2535 * number was acked, update smoothed round trip time.
2536 * Since we now have an rtt measurement, cancel the
2537 * timer backoff (cf., Phil Karn's retransmit alg.).
2538 * Recompute the initial retransmit timer.
2540 * Some boxes send broken timestamp replies
2541 * during the SYN+ACK phase, ignore
2542 * timestamps of 0 or we could calculate a
2543 * huge RTT and blow up the retransmit timer.
2545 if ((to.to_flags & TOF_TS) != 0 &&
2547 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr)
2548 tp->t_rttlow = ticks - to.to_tsecr;
2549 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1);
2550 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2551 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2552 tp->t_rttlow = ticks - tp->t_rtttime;
2553 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2555 tcp_xmit_bandwidth_limit(tp, th->th_ack);
2558 * If all outstanding data is acked, stop retransmit
2559 * timer and remember to restart (more output or persist).
2560 * If there is more data to be acked, restart retransmit
2561 * timer, using current (possibly backed-off) value.
2563 if (th->th_ack == tp->snd_max) {
2564 tcp_timer_activate(tp, TT_REXMT, 0);
2566 } else if (!tcp_timer_active(tp, TT_PERSIST))
2567 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2570 * If no data (only SYN) was ACK'd,
2571 * skip rest of ACK processing.
2577 * Let the congestion control algorithm update congestion
2578 * control related information. This typically means increasing
2579 * the congestion window.
2581 cc_ack_received(tp, th, CC_ACK);
2583 SOCKBUF_LOCK(&so->so_snd);
2584 if (acked > so->so_snd.sb_cc) {
2585 tp->snd_wnd -= so->so_snd.sb_cc;
2586 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc);
2589 sbdrop_locked(&so->so_snd, acked);
2590 tp->snd_wnd -= acked;
2593 /* NB: sowwakeup_locked() does an implicit unlock. */
2594 sowwakeup_locked(so);
2595 /* Detect una wraparound. */
2596 if (!IN_RECOVERY(tp->t_flags) &&
2597 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2598 SEQ_LEQ(th->th_ack, tp->snd_recover))
2599 tp->snd_recover = th->th_ack - 1;
2600 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2601 if (IN_RECOVERY(tp->t_flags) &&
2602 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2603 EXIT_RECOVERY(tp->t_flags);
2605 tp->snd_una = th->th_ack;
2606 if (tp->t_flags & TF_SACK_PERMIT) {
2607 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2608 tp->snd_recover = tp->snd_una;
2610 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2611 tp->snd_nxt = tp->snd_una;
2613 switch (tp->t_state) {
2616 * In FIN_WAIT_1 STATE in addition to the processing
2617 * for the ESTABLISHED state if our FIN is now acknowledged
2618 * then enter FIN_WAIT_2.
2620 case TCPS_FIN_WAIT_1:
2621 if (ourfinisacked) {
2623 * If we can't receive any more
2624 * data, then closing user can proceed.
2625 * Starting the timer is contrary to the
2626 * specification, but if we don't get a FIN
2627 * we'll hang forever.
2630 * we should release the tp also, and use a
2633 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2636 soisdisconnected(so);
2637 timeout = (tcp_fast_finwait2_recycle) ?
2638 tcp_finwait2_timeout : tcp_maxidle;
2639 tcp_timer_activate(tp, TT_2MSL, timeout);
2641 tp->t_state = TCPS_FIN_WAIT_2;
2646 * In CLOSING STATE in addition to the processing for
2647 * the ESTABLISHED state if the ACK acknowledges our FIN
2648 * then enter the TIME-WAIT state, otherwise ignore
2652 if (ourfinisacked) {
2653 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2655 INP_INFO_WUNLOCK(&V_tcbinfo);
2662 * In LAST_ACK, we may still be waiting for data to drain
2663 * and/or to be acked, as well as for the ack of our FIN.
2664 * If our FIN is now acknowledged, delete the TCB,
2665 * enter the closed state and return.
2668 if (ourfinisacked) {
2669 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2678 INP_INFO_LOCK_ASSERT(&V_tcbinfo);
2679 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED,
2680 ("tcp_do_segment: step6 ti_locked %d", ti_locked));
2681 INP_WLOCK_ASSERT(tp->t_inpcb);
2684 * Update window information.
2685 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2687 if ((thflags & TH_ACK) &&
2688 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2689 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2690 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2691 /* keep track of pure window updates */
2693 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2694 TCPSTAT_INC(tcps_rcvwinupd);
2695 tp->snd_wnd = tiwin;
2696 tp->snd_wl1 = th->th_seq;
2697 tp->snd_wl2 = th->th_ack;
2698 if (tp->snd_wnd > tp->max_sndwnd)
2699 tp->max_sndwnd = tp->snd_wnd;
2704 * Process segments with URG.
2706 if ((thflags & TH_URG) && th->th_urp &&
2707 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2709 * This is a kludge, but if we receive and accept
2710 * random urgent pointers, we'll crash in
2711 * soreceive. It's hard to imagine someone
2712 * actually wanting to send this much urgent data.
2714 SOCKBUF_LOCK(&so->so_rcv);
2715 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
2716 th->th_urp = 0; /* XXX */
2717 thflags &= ~TH_URG; /* XXX */
2718 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2719 goto dodata; /* XXX */
2722 * If this segment advances the known urgent pointer,
2723 * then mark the data stream. This should not happen
2724 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2725 * a FIN has been received from the remote side.
2726 * In these states we ignore the URG.
2728 * According to RFC961 (Assigned Protocols),
2729 * the urgent pointer points to the last octet
2730 * of urgent data. We continue, however,
2731 * to consider it to indicate the first octet
2732 * of data past the urgent section as the original
2733 * spec states (in one of two places).
2735 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2736 tp->rcv_up = th->th_seq + th->th_urp;
2737 so->so_oobmark = so->so_rcv.sb_cc +
2738 (tp->rcv_up - tp->rcv_nxt) - 1;
2739 if (so->so_oobmark == 0)
2740 so->so_rcv.sb_state |= SBS_RCVATMARK;
2742 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2744 SOCKBUF_UNLOCK(&so->so_rcv);
2746 * Remove out of band data so doesn't get presented to user.
2747 * This can happen independent of advancing the URG pointer,
2748 * but if two URG's are pending at once, some out-of-band
2749 * data may creep in... ick.
2751 if (th->th_urp <= (u_long)tlen &&
2752 !(so->so_options & SO_OOBINLINE)) {
2753 /* hdr drop is delayed */
2754 tcp_pulloutofband(so, th, m, drop_hdrlen);
2758 * If no out of band data is expected,
2759 * pull receive urgent pointer along
2760 * with the receive window.
2762 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2763 tp->rcv_up = tp->rcv_nxt;
2766 INP_INFO_LOCK_ASSERT(&V_tcbinfo);
2767 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED,
2768 ("tcp_do_segment: dodata ti_locked %d", ti_locked));
2769 INP_WLOCK_ASSERT(tp->t_inpcb);
2772 * Process the segment text, merging it into the TCP sequencing queue,
2773 * and arranging for acknowledgment of receipt if necessary.
2774 * This process logically involves adjusting tp->rcv_wnd as data
2775 * is presented to the user (this happens in tcp_usrreq.c,
2776 * case PRU_RCVD). If a FIN has already been received on this
2777 * connection then we just ignore the text.
2779 if ((tlen || (thflags & TH_FIN)) &&
2780 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2781 tcp_seq save_start = th->th_seq;
2782 m_adj(m, drop_hdrlen); /* delayed header drop */
2784 * Insert segment which includes th into TCP reassembly queue
2785 * with control block tp. Set thflags to whether reassembly now
2786 * includes a segment with FIN. This handles the common case
2787 * inline (segment is the next to be received on an established
2788 * connection, and the queue is empty), avoiding linkage into
2789 * and removal from the queue and repetition of various
2791 * Set DELACK for segments received in order, but ack
2792 * immediately when segments are out of order (so
2793 * fast retransmit can work).
2795 if (th->th_seq == tp->rcv_nxt &&
2796 LIST_EMPTY(&tp->t_segq) &&
2797 TCPS_HAVEESTABLISHED(tp->t_state)) {
2799 tp->t_flags |= TF_DELACK;
2801 tp->t_flags |= TF_ACKNOW;
2802 tp->rcv_nxt += tlen;
2803 thflags = th->th_flags & TH_FIN;
2804 TCPSTAT_INC(tcps_rcvpack);
2805 TCPSTAT_ADD(tcps_rcvbyte, tlen);
2807 SOCKBUF_LOCK(&so->so_rcv);
2808 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2811 sbappendstream_locked(&so->so_rcv, m);
2812 /* NB: sorwakeup_locked() does an implicit unlock. */
2813 sorwakeup_locked(so);
2816 * XXX: Due to the header drop above "th" is
2817 * theoretically invalid by now. Fortunately
2818 * m_adj() doesn't actually frees any mbufs
2819 * when trimming from the head.
2821 thflags = tcp_reass(tp, th, &tlen, m);
2822 tp->t_flags |= TF_ACKNOW;
2824 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
2825 tcp_update_sack_list(tp, save_start, save_start + tlen);
2828 * Note the amount of data that peer has sent into
2829 * our window, in order to estimate the sender's
2833 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2841 * If FIN is received ACK the FIN and let the user know
2842 * that the connection is closing.
2844 if (thflags & TH_FIN) {
2845 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2848 * If connection is half-synchronized
2849 * (ie NEEDSYN flag on) then delay ACK,
2850 * so it may be piggybacked when SYN is sent.
2851 * Otherwise, since we received a FIN then no
2852 * more input can be expected, send ACK now.
2854 if (tp->t_flags & TF_NEEDSYN)
2855 tp->t_flags |= TF_DELACK;
2857 tp->t_flags |= TF_ACKNOW;
2860 switch (tp->t_state) {
2863 * In SYN_RECEIVED and ESTABLISHED STATES
2864 * enter the CLOSE_WAIT state.
2866 case TCPS_SYN_RECEIVED:
2867 tp->t_starttime = ticks;
2869 case TCPS_ESTABLISHED:
2870 tp->t_state = TCPS_CLOSE_WAIT;
2874 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2875 * enter the CLOSING state.
2877 case TCPS_FIN_WAIT_1:
2878 tp->t_state = TCPS_CLOSING;
2882 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2883 * starting the time-wait timer, turning off the other
2886 case TCPS_FIN_WAIT_2:
2887 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2888 KASSERT(ti_locked == TI_WLOCKED, ("%s: dodata "
2889 "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
2893 INP_INFO_WUNLOCK(&V_tcbinfo);
2897 if (ti_locked == TI_RLOCKED)
2898 INP_INFO_RUNLOCK(&V_tcbinfo);
2899 else if (ti_locked == TI_WLOCKED)
2900 INP_INFO_WUNLOCK(&V_tcbinfo);
2902 panic("%s: dodata epilogue ti_locked %d", __func__,
2904 ti_locked = TI_UNLOCKED;
2907 if (so->so_options & SO_DEBUG)
2908 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
2913 * Return any desired output.
2915 if (needoutput || (tp->t_flags & TF_ACKNOW))
2916 (void) tcp_output(tp);
2919 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
2920 __func__, ti_locked));
2921 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
2922 INP_WLOCK_ASSERT(tp->t_inpcb);
2924 if (tp->t_flags & TF_DELACK) {
2925 tp->t_flags &= ~TF_DELACK;
2926 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
2928 INP_WUNLOCK(tp->t_inpcb);
2932 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED,
2933 ("tcp_do_segment: dropafterack ti_locked %d", ti_locked));
2936 * Generate an ACK dropping incoming segment if it occupies
2937 * sequence space, where the ACK reflects our state.
2939 * We can now skip the test for the RST flag since all
2940 * paths to this code happen after packets containing
2941 * RST have been dropped.
2943 * In the SYN-RECEIVED state, don't send an ACK unless the
2944 * segment we received passes the SYN-RECEIVED ACK test.
2945 * If it fails send a RST. This breaks the loop in the
2946 * "LAND" DoS attack, and also prevents an ACK storm
2947 * between two listening ports that have been sent forged
2948 * SYN segments, each with the source address of the other.
2950 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
2951 (SEQ_GT(tp->snd_una, th->th_ack) ||
2952 SEQ_GT(th->th_ack, tp->snd_max)) ) {
2953 rstreason = BANDLIM_RST_OPENPORT;
2957 if (so->so_options & SO_DEBUG)
2958 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2961 if (ti_locked == TI_RLOCKED)
2962 INP_INFO_RUNLOCK(&V_tcbinfo);
2963 else if (ti_locked == TI_WLOCKED)
2964 INP_INFO_WUNLOCK(&V_tcbinfo);
2966 panic("%s: dropafterack epilogue ti_locked %d", __func__,
2968 ti_locked = TI_UNLOCKED;
2970 tp->t_flags |= TF_ACKNOW;
2971 (void) tcp_output(tp);
2972 INP_WUNLOCK(tp->t_inpcb);
2977 if (ti_locked == TI_RLOCKED)
2978 INP_INFO_RUNLOCK(&V_tcbinfo);
2979 else if (ti_locked == TI_WLOCKED)
2980 INP_INFO_WUNLOCK(&V_tcbinfo);
2982 panic("%s: dropwithreset ti_locked %d", __func__, ti_locked);
2983 ti_locked = TI_UNLOCKED;
2986 tcp_dropwithreset(m, th, tp, tlen, rstreason);
2987 INP_WUNLOCK(tp->t_inpcb);
2989 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
2993 if (ti_locked == TI_RLOCKED)
2994 INP_INFO_RUNLOCK(&V_tcbinfo);
2995 else if (ti_locked == TI_WLOCKED)
2996 INP_INFO_WUNLOCK(&V_tcbinfo);
2999 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3001 ti_locked = TI_UNLOCKED;
3004 * Drop space held by incoming segment and return.
3007 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3008 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3012 INP_WUNLOCK(tp->t_inpcb);
3017 * Issue RST and make ACK acceptable to originator of segment.
3018 * The mbuf must still include the original packet header.
3022 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3023 int tlen, int rstreason)
3027 struct ip6_hdr *ip6;
3031 INP_WLOCK_ASSERT(tp->t_inpcb);
3034 /* Don't bother if destination was broadcast/multicast. */
3035 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3038 if (mtod(m, struct ip *)->ip_v == 6) {
3039 ip6 = mtod(m, struct ip6_hdr *);
3040 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3041 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3043 /* IPv6 anycast check is done at tcp6_input() */
3047 ip = mtod(m, struct ip *);
3048 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3049 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3050 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3051 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3055 /* Perform bandwidth limiting. */
3056 if (badport_bandlim(rstreason) < 0)
3059 /* tcp_respond consumes the mbuf chain. */
3060 if (th->th_flags & TH_ACK) {
3061 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3062 th->th_ack, TH_RST);
3064 if (th->th_flags & TH_SYN)
3066 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3067 (tcp_seq)0, TH_RST|TH_ACK);
3075 * Parse TCP options and place in tcpopt.
3078 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3083 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3085 if (opt == TCPOPT_EOL)
3087 if (opt == TCPOPT_NOP)
3093 if (optlen < 2 || optlen > cnt)
3098 if (optlen != TCPOLEN_MAXSEG)
3100 if (!(flags & TO_SYN))
3102 to->to_flags |= TOF_MSS;
3103 bcopy((char *)cp + 2,
3104 (char *)&to->to_mss, sizeof(to->to_mss));
3105 to->to_mss = ntohs(to->to_mss);
3108 if (optlen != TCPOLEN_WINDOW)
3110 if (!(flags & TO_SYN))
3112 to->to_flags |= TOF_SCALE;
3113 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3115 case TCPOPT_TIMESTAMP:
3116 if (optlen != TCPOLEN_TIMESTAMP)
3118 to->to_flags |= TOF_TS;
3119 bcopy((char *)cp + 2,
3120 (char *)&to->to_tsval, sizeof(to->to_tsval));
3121 to->to_tsval = ntohl(to->to_tsval);
3122 bcopy((char *)cp + 6,
3123 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3124 to->to_tsecr = ntohl(to->to_tsecr);
3126 #ifdef TCP_SIGNATURE
3128 * XXX In order to reply to a host which has set the
3129 * TCP_SIGNATURE option in its initial SYN, we have to
3130 * record the fact that the option was observed here
3131 * for the syncache code to perform the correct response.
3133 case TCPOPT_SIGNATURE:
3134 if (optlen != TCPOLEN_SIGNATURE)
3136 to->to_flags |= TOF_SIGNATURE;
3137 to->to_signature = cp + 2;
3140 case TCPOPT_SACK_PERMITTED:
3141 if (optlen != TCPOLEN_SACK_PERMITTED)
3143 if (!(flags & TO_SYN))
3147 to->to_flags |= TOF_SACKPERM;
3150 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3154 to->to_flags |= TOF_SACK;
3155 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3156 to->to_sacks = cp + 2;
3157 TCPSTAT_INC(tcps_sack_rcv_blocks);
3166 * Pull out of band byte out of a segment so
3167 * it doesn't appear in the user's data queue.
3168 * It is still reflected in the segment length for
3169 * sequencing purposes.
3172 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3175 int cnt = off + th->th_urp - 1;
3178 if (m->m_len > cnt) {
3179 char *cp = mtod(m, caddr_t) + cnt;
3180 struct tcpcb *tp = sototcpcb(so);
3182 INP_WLOCK_ASSERT(tp->t_inpcb);
3185 tp->t_oobflags |= TCPOOB_HAVEDATA;
3186 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3188 if (m->m_flags & M_PKTHDR)
3197 panic("tcp_pulloutofband");
3201 * Collect new round-trip time estimate
3202 * and update averages and current timeout.
3205 tcp_xmit_timer(struct tcpcb *tp, int rtt)
3209 INP_WLOCK_ASSERT(tp->t_inpcb);
3211 TCPSTAT_INC(tcps_rttupdated);
3213 if (tp->t_srtt != 0) {
3215 * srtt is stored as fixed point with 5 bits after the
3216 * binary point (i.e., scaled by 8). The following magic
3217 * is equivalent to the smoothing algorithm in rfc793 with
3218 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3219 * point). Adjust rtt to origin 0.
3221 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3222 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3224 if ((tp->t_srtt += delta) <= 0)
3228 * We accumulate a smoothed rtt variance (actually, a
3229 * smoothed mean difference), then set the retransmit
3230 * timer to smoothed rtt + 4 times the smoothed variance.
3231 * rttvar is stored as fixed point with 4 bits after the
3232 * binary point (scaled by 16). The following is
3233 * equivalent to rfc793 smoothing with an alpha of .75
3234 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3235 * rfc793's wired-in beta.
3239 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3240 if ((tp->t_rttvar += delta) <= 0)
3242 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3243 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3246 * No rtt measurement yet - use the unsmoothed rtt.
3247 * Set the variance to half the rtt (so our first
3248 * retransmit happens at 3*rtt).
3250 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3251 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3252 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3258 * the retransmit should happen at rtt + 4 * rttvar.
3259 * Because of the way we do the smoothing, srtt and rttvar
3260 * will each average +1/2 tick of bias. When we compute
3261 * the retransmit timer, we want 1/2 tick of rounding and
3262 * 1 extra tick because of +-1/2 tick uncertainty in the
3263 * firing of the timer. The bias will give us exactly the
3264 * 1.5 tick we need. But, because the bias is
3265 * statistical, we have to test that we don't drop below
3266 * the minimum feasible timer (which is 2 ticks).
3268 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3269 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3272 * We received an ack for a packet that wasn't retransmitted;
3273 * it is probably safe to discard any error indications we've
3274 * received recently. This isn't quite right, but close enough
3275 * for now (a route might have failed after we sent a segment,
3276 * and the return path might not be symmetrical).
3278 tp->t_softerror = 0;
3282 * Determine a reasonable value for maxseg size.
3283 * If the route is known, check route for mtu.
3284 * If none, use an mss that can be handled on the outgoing
3285 * interface without forcing IP to fragment; if bigger than
3286 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
3287 * to utilize large mbufs. If no route is found, route has no mtu,
3288 * or the destination isn't local, use a default, hopefully conservative
3289 * size (usually 512 or the default IP max size, but no more than the mtu
3290 * of the interface), as we can't discover anything about intervening
3291 * gateways or networks. We also initialize the congestion/slow start
3292 * window to be a single segment if the destination isn't local.
3293 * While looking at the routing entry, we also initialize other path-dependent
3294 * parameters from pre-set or cached values in the routing entry.
3296 * Also take into account the space needed for options that we
3297 * send regularly. Make maxseg shorter by that amount to assure
3298 * that we can send maxseg amount of data even when the options
3299 * are present. Store the upper limit of the length of options plus
3302 * In case of T/TCP, we call this routine during implicit connection
3303 * setup as well (offer = -1), to initialize maxseg from the cached
3306 * NOTE that this routine is only called when we process an incoming
3307 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt().
3310 tcp_mss_update(struct tcpcb *tp, int offer,
3311 struct hc_metrics_lite *metricptr, int *mtuflags)
3315 struct inpcb *inp = tp->t_inpcb;
3316 struct hc_metrics_lite metrics;
3317 int origoffer = offer;
3319 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3320 size_t min_protoh = isipv6 ?
3321 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3322 sizeof (struct tcpiphdr);
3324 const size_t min_protoh = sizeof(struct tcpiphdr);
3327 INP_WLOCK_ASSERT(tp->t_inpcb);
3332 maxmtu = tcp_maxmtu6(&inp->inp_inc, mtuflags);
3333 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt;
3337 maxmtu = tcp_maxmtu(&inp->inp_inc, mtuflags);
3338 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt;
3342 * No route to sender, stay with default mss and return.
3346 * In case we return early we need to initialize metrics
3347 * to a defined state as tcp_hc_get() would do for us
3348 * if there was no cache hit.
3350 if (metricptr != NULL)
3351 bzero(metricptr, sizeof(struct hc_metrics_lite));
3355 /* What have we got? */
3359 * Offer == 0 means that there was no MSS on the SYN
3360 * segment, in this case we use tcp_mssdflt as
3361 * already assigned to t_maxopd above.
3363 offer = tp->t_maxopd;
3368 * Offer == -1 means that we didn't receive SYN yet.
3374 * Prevent DoS attack with too small MSS. Round up
3375 * to at least minmss.
3377 offer = max(offer, V_tcp_minmss);
3381 * rmx information is now retrieved from tcp_hostcache.
3383 tcp_hc_get(&inp->inp_inc, &metrics);
3384 if (metricptr != NULL)
3385 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3388 * If there's a discovered mtu int tcp hostcache, use it
3389 * else, use the link mtu.
3391 if (metrics.rmx_mtu)
3392 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3396 mss = maxmtu - min_protoh;
3397 if (!V_path_mtu_discovery &&
3398 !in6_localaddr(&inp->in6p_faddr))
3399 mss = min(mss, V_tcp_v6mssdflt);
3403 mss = maxmtu - min_protoh;
3404 if (!V_path_mtu_discovery &&
3405 !in_localaddr(inp->inp_faddr))
3406 mss = min(mss, V_tcp_mssdflt);
3409 * XXX - The above conditional (mss = maxmtu - min_protoh)
3410 * probably violates the TCP spec.
3411 * The problem is that, since we don't know the
3412 * other end's MSS, we are supposed to use a conservative
3413 * default. But, if we do that, then MTU discovery will
3414 * never actually take place, because the conservative
3415 * default is much less than the MTUs typically seen
3416 * on the Internet today. For the moment, we'll sweep
3417 * this under the carpet.
3419 * The conservative default might not actually be a problem
3420 * if the only case this occurs is when sending an initial
3421 * SYN with options and data to a host we've never talked
3422 * to before. Then, they will reply with an MSS value which
3423 * will get recorded and the new parameters should get
3424 * recomputed. For Further Study.
3427 mss = min(mss, offer);
3430 * Sanity check: make sure that maxopd will be large
3431 * enough to allow some data on segments even if the
3432 * all the option space is used (40bytes). Otherwise
3433 * funny things may happen in tcp_output.
3438 * maxopd stores the maximum length of data AND options
3439 * in a segment; maxseg is the amount of data in a normal
3440 * segment. We need to store this value (maxopd) apart
3441 * from maxseg, because now every segment carries options
3442 * and thus we normally have somewhat less data in segments.
3447 * origoffer==-1 indicates that no segments were received yet.
3448 * In this case we just guess.
3450 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
3452 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
3453 mss -= TCPOLEN_TSTAMP_APPA;
3455 #if (MCLBYTES & (MCLBYTES - 1)) == 0
3457 mss &= ~(MCLBYTES-1);
3460 mss = mss / MCLBYTES * MCLBYTES;
3466 tcp_mss(struct tcpcb *tp, int offer)
3472 struct hc_metrics_lite metrics;
3475 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3477 tcp_mss_update(tp, offer, &metrics, &mtuflags);
3483 * If there's a pipesize, change the socket buffer to that size,
3484 * don't change if sb_hiwat is different than default (then it
3485 * has been changed on purpose with setsockopt).
3486 * Make the socket buffers an integral number of mss units;
3487 * if the mss is larger than the socket buffer, decrease the mss.
3489 so = inp->inp_socket;
3490 SOCKBUF_LOCK(&so->so_snd);
3491 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe)
3492 bufsize = metrics.rmx_sendpipe;
3494 bufsize = so->so_snd.sb_hiwat;
3498 bufsize = roundup(bufsize, mss);
3499 if (bufsize > sb_max)
3501 if (bufsize > so->so_snd.sb_hiwat)
3502 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3504 SOCKBUF_UNLOCK(&so->so_snd);
3507 SOCKBUF_LOCK(&so->so_rcv);
3508 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe)
3509 bufsize = metrics.rmx_recvpipe;
3511 bufsize = so->so_rcv.sb_hiwat;
3512 if (bufsize > mss) {
3513 bufsize = roundup(bufsize, mss);
3514 if (bufsize > sb_max)
3516 if (bufsize > so->so_rcv.sb_hiwat)
3517 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3519 SOCKBUF_UNLOCK(&so->so_rcv);
3521 /* Check the interface for TSO capabilities. */
3522 if (mtuflags & CSUM_TSO)
3523 tp->t_flags |= TF_TSO;
3527 * Determine the MSS option to send on an outgoing SYN.
3530 tcp_mssopt(struct in_conninfo *inc)
3537 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3540 if (inc->inc_flags & INC_ISIPV6) {
3541 mss = V_tcp_v6mssdflt;
3542 maxmtu = tcp_maxmtu6(inc, NULL);
3543 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3544 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3548 mss = V_tcp_mssdflt;
3549 maxmtu = tcp_maxmtu(inc, NULL);
3550 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3551 min_protoh = sizeof(struct tcpiphdr);
3553 if (maxmtu && thcmtu)
3554 mss = min(maxmtu, thcmtu) - min_protoh;
3555 else if (maxmtu || thcmtu)
3556 mss = max(maxmtu, thcmtu) - min_protoh;
3563 * On a partial ack arrives, force the retransmission of the
3564 * next unacknowledged segment. Do not clear tp->t_dupacks.
3565 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3569 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3571 tcp_seq onxt = tp->snd_nxt;
3572 u_long ocwnd = tp->snd_cwnd;
3574 INP_WLOCK_ASSERT(tp->t_inpcb);
3576 tcp_timer_activate(tp, TT_REXMT, 0);
3578 tp->snd_nxt = th->th_ack;
3580 * Set snd_cwnd to one segment beyond acknowledged offset.
3581 * (tp->snd_una has not yet been updated when this function is called.)
3583 tp->snd_cwnd = tp->t_maxseg + BYTES_THIS_ACK(tp, th);
3584 tp->t_flags |= TF_ACKNOW;
3585 (void) tcp_output(tp);
3586 tp->snd_cwnd = ocwnd;
3587 if (SEQ_GT(onxt, tp->snd_nxt))
3590 * Partial window deflation. Relies on fact that tp->snd_una
3593 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
3594 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
3597 tp->snd_cwnd += tp->t_maxseg;