2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
33 #include "opt_compat.h"
35 #include "opt_inet6.h"
36 #include "opt_ipsec.h"
38 #include "opt_tcpdebug.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/callout.h>
43 #include <sys/kernel.h>
44 #include <sys/sysctl.h>
45 #include <sys/malloc.h>
48 #include <sys/domain.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/protosw.h>
55 #include <sys/random.h>
59 #include <net/route.h>
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/ip.h>
66 #include <netinet/ip6.h>
68 #include <netinet/in_pcb.h>
70 #include <netinet6/in6_pcb.h>
72 #include <netinet/in_var.h>
73 #include <netinet/ip_var.h>
75 #include <netinet6/ip6_var.h>
76 #include <netinet6/scope6_var.h>
77 #include <netinet6/nd6.h>
79 #include <netinet/ip_icmp.h>
80 #include <netinet/tcp.h>
81 #include <netinet/tcp_fsm.h>
82 #include <netinet/tcp_seq.h>
83 #include <netinet/tcp_timer.h>
84 #include <netinet/tcp_var.h>
85 #include <netinet/tcp_syncache.h>
87 #include <netinet6/tcp6_var.h>
89 #include <netinet/tcpip.h>
91 #include <netinet/tcp_debug.h>
93 #include <netinet6/ip6protosw.h>
96 #include <netipsec/ipsec.h>
97 #include <netipsec/xform.h>
99 #include <netipsec/ipsec6.h>
101 #include <netipsec/key.h>
104 #include <machine/in_cksum.h>
107 #include <security/mac/mac_framework.h>
109 int tcp_mssdflt = TCP_MSS;
110 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
111 &tcp_mssdflt, 0, "Default TCP Maximum Segment Size");
114 int tcp_v6mssdflt = TCP6_MSS;
115 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
116 CTLFLAG_RW, &tcp_v6mssdflt , 0,
117 "Default TCP Maximum Segment Size for IPv6");
121 * Minimum MSS we accept and use. This prevents DoS attacks where
122 * we are forced to a ridiculous low MSS like 20 and send hundreds
123 * of packets instead of one. The effect scales with the available
124 * bandwidth and quickly saturates the CPU and network interface
125 * with packet generation and sending. Set to zero to disable MINMSS
126 * checking. This setting prevents us from sending too small packets.
128 int tcp_minmss = TCP_MINMSS;
129 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
130 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
132 int tcp_do_rfc1323 = 1;
133 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
134 &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions");
136 static int tcp_log_debug = 1;
137 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW,
138 &tcp_log_debug, 0, "Log errors caused by incoming TCP segments");
140 static int tcp_tcbhashsize = 0;
141 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN,
142 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
144 static int do_tcpdrain = 1;
145 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW,
147 "Enable tcp_drain routine for extra help when low on mbufs");
149 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
150 &tcbinfo.ipi_count, 0, "Number of active PCBs");
152 static int icmp_may_rst = 1;
153 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW,
155 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
157 static int tcp_isn_reseed_interval = 0;
158 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
159 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
162 * TCP bandwidth limiting sysctls. Note that the default lower bound of
163 * 1024 exists only for debugging. A good production default would be
164 * something like 6100.
166 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, inflight, CTLFLAG_RW, 0,
167 "TCP inflight data limiting");
169 static int tcp_inflight_enable = 1;
170 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, enable, CTLFLAG_RW,
171 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting");
173 static int tcp_inflight_debug = 0;
174 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, debug, CTLFLAG_RW,
175 &tcp_inflight_debug, 0, "Debug TCP inflight calculations");
177 static int tcp_inflight_rttthresh;
178 SYSCTL_PROC(_net_inet_tcp_inflight, OID_AUTO, rttthresh, CTLTYPE_INT|CTLFLAG_RW,
179 &tcp_inflight_rttthresh, 0, sysctl_msec_to_ticks, "I",
180 "RTT threshold below which inflight will deactivate itself");
182 static int tcp_inflight_min = 6144;
183 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, min, CTLFLAG_RW,
184 &tcp_inflight_min, 0, "Lower-bound for TCP inflight window");
186 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT;
187 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, max, CTLFLAG_RW,
188 &tcp_inflight_max, 0, "Upper-bound for TCP inflight window");
190 static int tcp_inflight_stab = 20;
191 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, stab, CTLFLAG_RW,
192 &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets");
194 uma_zone_t sack_hole_zone;
196 static struct inpcb *tcp_notify(struct inpcb *, int);
197 static void tcp_isn_tick(void *);
200 * Target size of TCP PCB hash tables. Must be a power of two.
202 * Note that this can be overridden by the kernel environment
203 * variable net.inet.tcp.tcbhashsize
206 #define TCBHASHSIZE 512
211 * Callouts should be moved into struct tcp directly. They are currently
212 * separate because the tcpcb structure is exported to userland for sysctl
213 * parsing purposes, which do not know about callouts.
220 static uma_zone_t tcpcb_zone;
221 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers");
222 struct callout isn_callout;
223 static struct mtx isn_mtx;
225 #define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
226 #define ISN_LOCK() mtx_lock(&isn_mtx)
227 #define ISN_UNLOCK() mtx_unlock(&isn_mtx)
230 * TCP initialization.
233 tcp_zone_change(void *tag)
236 uma_zone_set_max(tcbinfo.ipi_zone, maxsockets);
237 uma_zone_set_max(tcpcb_zone, maxsockets);
238 tcp_tw_zone_change();
242 tcp_inpcb_init(void *mem, int size, int flags)
244 struct inpcb *inp = mem;
246 INP_LOCK_INIT(inp, "inp", "tcpinp");
254 int hashsize = TCBHASHSIZE;
255 tcp_delacktime = TCPTV_DELACK;
256 tcp_keepinit = TCPTV_KEEP_INIT;
257 tcp_keepidle = TCPTV_KEEP_IDLE;
258 tcp_keepintvl = TCPTV_KEEPINTVL;
259 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
261 tcp_rexmit_min = TCPTV_MIN;
262 if (tcp_rexmit_min < 1)
264 tcp_rexmit_slop = TCPTV_CPU_VAR;
265 tcp_inflight_rttthresh = TCPTV_INFLIGHT_RTTTHRESH;
266 tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT;
268 INP_INFO_LOCK_INIT(&tcbinfo, "tcp");
270 tcbinfo.ipi_listhead = &tcb;
271 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
272 if (!powerof2(hashsize)) {
273 printf("WARNING: TCB hash size not a power of 2\n");
274 hashsize = 512; /* safe default */
276 tcp_tcbhashsize = hashsize;
277 tcbinfo.ipi_hashbase = hashinit(hashsize, M_PCB,
278 &tcbinfo.ipi_hashmask);
279 tcbinfo.ipi_porthashbase = hashinit(hashsize, M_PCB,
280 &tcbinfo.ipi_porthashmask);
281 tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb),
282 NULL, NULL, tcp_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
283 uma_zone_set_max(tcbinfo.ipi_zone, maxsockets);
285 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
287 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
289 if (max_protohdr < TCP_MINPROTOHDR)
290 max_protohdr = TCP_MINPROTOHDR;
291 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
293 #undef TCP_MINPROTOHDR
295 * These have to be type stable for the benefit of the timers.
297 tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
298 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
299 uma_zone_set_max(tcpcb_zone, maxsockets);
305 callout_init(&isn_callout, CALLOUT_MPSAFE);
307 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
308 SHUTDOWN_PRI_DEFAULT);
309 sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
310 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
311 EVENTHANDLER_REGISTER(maxsockets_change, tcp_zone_change, NULL,
312 EVENTHANDLER_PRI_ANY);
319 callout_stop(&isn_callout);
323 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
324 * tcp_template used to store this data in mbufs, but we now recopy it out
325 * of the tcpcb each time to conserve mbufs.
328 tcpip_fillheaders(struct inpcb *inp, void *ip_ptr, void *tcp_ptr)
330 struct tcphdr *th = (struct tcphdr *)tcp_ptr;
332 INP_LOCK_ASSERT(inp);
335 if ((inp->inp_vflag & INP_IPV6) != 0) {
338 ip6 = (struct ip6_hdr *)ip_ptr;
339 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
340 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
341 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
342 (IPV6_VERSION & IPV6_VERSION_MASK);
343 ip6->ip6_nxt = IPPROTO_TCP;
344 ip6->ip6_plen = sizeof(struct tcphdr);
345 ip6->ip6_src = inp->in6p_laddr;
346 ip6->ip6_dst = inp->in6p_faddr;
352 ip = (struct ip *)ip_ptr;
353 ip->ip_v = IPVERSION;
355 ip->ip_tos = inp->inp_ip_tos;
359 ip->ip_ttl = inp->inp_ip_ttl;
361 ip->ip_p = IPPROTO_TCP;
362 ip->ip_src = inp->inp_laddr;
363 ip->ip_dst = inp->inp_faddr;
365 th->th_sport = inp->inp_lport;
366 th->th_dport = inp->inp_fport;
374 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */
378 * Create template to be used to send tcp packets on a connection.
379 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
380 * use for this function is in keepalives, which use tcp_respond.
383 tcpip_maketemplate(struct inpcb *inp)
388 m = m_get(M_DONTWAIT, MT_DATA);
391 m->m_len = sizeof(struct tcptemp);
392 n = mtod(m, struct tcptemp *);
394 tcpip_fillheaders(inp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
399 * Send a single message to the TCP at address specified by
400 * the given TCP/IP header. If m == NULL, then we make a copy
401 * of the tcpiphdr at ti and send directly to the addressed host.
402 * This is used to force keep alive messages out using the TCP
403 * template for a connection. If flags are given then we send
404 * a message back to the TCP which originated the * segment ti,
405 * and discard the mbuf containing it and any other attached mbufs.
407 * In any case the ack and sequence number of the transmitted
408 * segment are as specified by the parameters.
410 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
413 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
414 tcp_seq ack, tcp_seq seq, int flags)
427 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
430 isipv6 = ((struct ip *)ipgen)->ip_v == 6;
437 KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
438 INP_LOCK_ASSERT(inp);
443 if (!(flags & TH_RST)) {
444 win = sbspace(&inp->inp_socket->so_rcv);
445 if (win > (long)TCP_MAXWIN << tp->rcv_scale)
446 win = (long)TCP_MAXWIN << tp->rcv_scale;
450 m = m_gethdr(M_DONTWAIT, MT_DATA);
454 m->m_data += max_linkhdr;
457 bcopy((caddr_t)ip6, mtod(m, caddr_t),
458 sizeof(struct ip6_hdr));
459 ip6 = mtod(m, struct ip6_hdr *);
460 nth = (struct tcphdr *)(ip6 + 1);
464 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
465 ip = mtod(m, struct ip *);
466 nth = (struct tcphdr *)(ip + 1);
468 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
473 m->m_data = (caddr_t)ipgen;
474 /* m_len is set later */
476 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
479 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
480 nth = (struct tcphdr *)(ip6 + 1);
484 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
485 nth = (struct tcphdr *)(ip + 1);
489 * this is usually a case when an extension header
490 * exists between the IPv6 header and the
493 nth->th_sport = th->th_sport;
494 nth->th_dport = th->th_dport;
496 xchg(nth->th_dport, nth->th_sport, n_short);
502 ip6->ip6_vfc = IPV6_VERSION;
503 ip6->ip6_nxt = IPPROTO_TCP;
504 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
506 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
510 tlen += sizeof (struct tcpiphdr);
512 ip->ip_ttl = ip_defttl;
513 if (path_mtu_discovery)
517 m->m_pkthdr.len = tlen;
518 m->m_pkthdr.rcvif = NULL;
522 * Packet is associated with a socket, so allow the
523 * label of the response to reflect the socket label.
525 INP_LOCK_ASSERT(inp);
526 mac_create_mbuf_from_inpcb(inp, m);
529 * Packet is not associated with a socket, so possibly
530 * update the label in place.
532 mac_reflect_mbuf_tcp(m);
535 nth->th_seq = htonl(seq);
536 nth->th_ack = htonl(ack);
538 nth->th_off = sizeof (struct tcphdr) >> 2;
539 nth->th_flags = flags;
541 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
543 nth->th_win = htons((u_short)win);
548 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
549 sizeof(struct ip6_hdr),
550 tlen - sizeof(struct ip6_hdr));
551 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
556 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
557 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
558 m->m_pkthdr.csum_flags = CSUM_TCP;
559 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
562 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
563 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
567 (void) ip6_output(m, NULL, NULL, ipflags, NULL, NULL, inp);
570 (void) ip_output(m, NULL, NULL, ipflags, NULL, inp);
574 * Create a new TCP control block, making an
575 * empty reassembly queue and hooking it to the argument
576 * protocol control block. The `inp' parameter must have
577 * come from the zone allocator set up in tcp_init().
580 tcp_newtcpcb(struct inpcb *inp)
582 struct tcpcb_mem *tm;
585 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
588 tm = uma_zalloc(tcpcb_zone, M_NOWAIT | M_ZERO);
592 tp->t_timers = &tm->tt;
593 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */
594 tp->t_maxseg = tp->t_maxopd =
596 isipv6 ? tcp_v6mssdflt :
600 /* Set up our timeouts. */
601 callout_init_mtx(&tp->t_timers->tt_timer, &inp->inp_mtx,
602 CALLOUT_RETURNUNLOCKED);
605 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
607 tp->t_flags |= TF_SACK_PERMIT;
608 TAILQ_INIT(&tp->snd_holes);
609 tp->t_inpcb = inp; /* XXX */
611 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
612 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
613 * reasonable initial retransmit time.
615 tp->t_srtt = TCPTV_SRTTBASE;
616 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
617 tp->t_rttmin = tcp_rexmit_min;
618 tp->t_rxtcur = TCPTV_RTOBASE;
619 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
620 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
621 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
622 tp->t_rcvtime = ticks;
623 tp->t_bw_rtttime = ticks;
625 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
626 * because the socket may be bound to an IPv6 wildcard address,
627 * which may match an IPv4-mapped IPv6 address.
629 inp->inp_ip_ttl = ip_defttl;
631 return (tp); /* XXX */
635 * Drop a TCP connection, reporting
636 * the specified error. If connection is synchronized,
637 * then send a RST to peer.
640 tcp_drop(struct tcpcb *tp, int errno)
642 struct socket *so = tp->t_inpcb->inp_socket;
644 INP_INFO_WLOCK_ASSERT(&tcbinfo);
645 INP_LOCK_ASSERT(tp->t_inpcb);
647 if (TCPS_HAVERCVDSYN(tp->t_state)) {
648 tp->t_state = TCPS_CLOSED;
649 (void) tcp_output(tp);
650 tcpstat.tcps_drops++;
652 tcpstat.tcps_conndrops++;
653 if (errno == ETIMEDOUT && tp->t_softerror)
654 errno = tp->t_softerror;
655 so->so_error = errno;
656 return (tcp_close(tp));
660 tcp_discardcb(struct tcpcb *tp)
663 struct inpcb *inp = tp->t_inpcb;
664 struct socket *so = inp->inp_socket;
666 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
669 INP_LOCK_ASSERT(inp);
672 * Make sure that all of our timers are stopped before we
675 * XXX: callout_stop() may race and a callout may already
676 * try to obtain the INP_LOCK. Only callout_drain() would
677 * stop this but it would cause a LOR thus we can't use it.
678 * The tcp_timer() function contains a lot of checks to
679 * handle this case rather gracefully.
681 tp->t_timers->tt_active = 0;
682 callout_stop(&tp->t_timers->tt_timer);
685 * If we got enough samples through the srtt filter,
686 * save the rtt and rttvar in the routing entry.
687 * 'Enough' is arbitrarily defined as 4 rtt samples.
688 * 4 samples is enough for the srtt filter to converge
689 * to within enough % of the correct value; fewer samples
690 * and we could save a bogus rtt. The danger is not high
691 * as tcp quickly recovers from everything.
692 * XXX: Works very well but needs some more statistics!
694 if (tp->t_rttupdated >= 4) {
695 struct hc_metrics_lite metrics;
698 bzero(&metrics, sizeof(metrics));
700 * Update the ssthresh always when the conditions below
701 * are satisfied. This gives us better new start value
702 * for the congestion avoidance for new connections.
703 * ssthresh is only set if packet loss occured on a session.
705 * XXXRW: 'so' may be NULL here, and/or socket buffer may be
706 * being torn down. Ideally this code would not use 'so'.
708 ssthresh = tp->snd_ssthresh;
709 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
711 * convert the limit from user data bytes to
712 * packets then to packet data bytes.
714 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
717 ssthresh *= (u_long)(tp->t_maxseg +
719 (isipv6 ? sizeof (struct ip6_hdr) +
720 sizeof (struct tcphdr) :
722 sizeof (struct tcpiphdr)
729 metrics.rmx_ssthresh = ssthresh;
731 metrics.rmx_rtt = tp->t_srtt;
732 metrics.rmx_rttvar = tp->t_rttvar;
733 /* XXX: This wraps if the pipe is more than 4 Gbit per second */
734 metrics.rmx_bandwidth = tp->snd_bandwidth;
735 metrics.rmx_cwnd = tp->snd_cwnd;
736 metrics.rmx_sendpipe = 0;
737 metrics.rmx_recvpipe = 0;
739 tcp_hc_update(&inp->inp_inc, &metrics);
742 /* free the reassembly queue, if any */
743 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) {
744 LIST_REMOVE(q, tqe_q);
746 uma_zfree(tcp_reass_zone, q);
750 tcp_free_sackholes(tp);
751 inp->inp_ppcb = NULL;
753 uma_zfree(tcpcb_zone, tp);
757 * Attempt to close a TCP control block, marking it as dropped, and freeing
758 * the socket if we hold the only reference.
761 tcp_close(struct tcpcb *tp)
763 struct inpcb *inp = tp->t_inpcb;
766 INP_INFO_WLOCK_ASSERT(&tcbinfo);
767 INP_LOCK_ASSERT(inp);
770 tcpstat.tcps_closed++;
771 KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
772 so = inp->inp_socket;
773 soisdisconnected(so);
774 if (inp->inp_vflag & INP_SOCKREF) {
775 KASSERT(so->so_state & SS_PROTOREF,
776 ("tcp_close: !SS_PROTOREF"));
777 inp->inp_vflag &= ~INP_SOCKREF;
781 so->so_state &= ~SS_PROTOREF;
795 struct tseg_qent *te;
798 * Walk the tcpbs, if existing, and flush the reassembly queue,
800 * XXX: The "Net/3" implementation doesn't imply that the TCP
801 * reassembly queue should be flushed, but in a situation
802 * where we're really low on mbufs, this is potentially
805 INP_INFO_RLOCK(&tcbinfo);
806 LIST_FOREACH(inpb, tcbinfo.ipi_listhead, inp_list) {
807 if (inpb->inp_vflag & INP_TIMEWAIT)
810 if ((tcpb = intotcpcb(inpb)) != NULL) {
811 while ((te = LIST_FIRST(&tcpb->t_segq))
813 LIST_REMOVE(te, tqe_q);
815 uma_zfree(tcp_reass_zone, te);
819 tcp_clean_sackreport(tcpb);
823 INP_INFO_RUNLOCK(&tcbinfo);
828 * Notify a tcp user of an asynchronous error;
829 * store error as soft error, but wake up user
830 * (for now, won't do anything until can select for soft error).
832 * Do not wake up user since there currently is no mechanism for
833 * reporting soft errors (yet - a kqueue filter may be added).
835 static struct inpcb *
836 tcp_notify(struct inpcb *inp, int error)
840 INP_INFO_WLOCK_ASSERT(&tcbinfo);
841 INP_LOCK_ASSERT(inp);
843 if ((inp->inp_vflag & INP_TIMEWAIT) ||
844 (inp->inp_vflag & INP_DROPPED))
848 KASSERT(tp != NULL, ("tcp_notify: tp == NULL"));
851 * Ignore some errors if we are hooked up.
852 * If connection hasn't completed, has retransmitted several times,
853 * and receives a second error, give up now. This is better
854 * than waiting a long time to establish a connection that
855 * can never complete.
857 if (tp->t_state == TCPS_ESTABLISHED &&
858 (error == EHOSTUNREACH || error == ENETUNREACH ||
859 error == EHOSTDOWN)) {
861 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
863 tp = tcp_drop(tp, error);
869 tp->t_softerror = error;
873 wakeup( &so->so_timeo);
880 tcp_pcblist(SYSCTL_HANDLER_ARGS)
882 int error, i, m, n, pcb_count;
883 struct inpcb *inp, **inp_list;
888 * The process of preparing the TCB list is too time-consuming and
889 * resource-intensive to repeat twice on every request.
891 if (req->oldptr == NULL) {
892 m = syncache_pcbcount();
893 n = tcbinfo.ipi_count;
894 req->oldidx = 2 * (sizeof xig)
895 + ((m + n) + n/8) * sizeof(struct xtcpcb);
899 if (req->newptr != NULL)
903 * OK, now we're committed to doing something.
905 INP_INFO_RLOCK(&tcbinfo);
906 gencnt = tcbinfo.ipi_gencnt;
907 n = tcbinfo.ipi_count;
908 INP_INFO_RUNLOCK(&tcbinfo);
910 m = syncache_pcbcount();
912 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
913 + (n + m) * sizeof(struct xtcpcb));
917 xig.xig_len = sizeof xig;
918 xig.xig_count = n + m;
919 xig.xig_gen = gencnt;
920 xig.xig_sogen = so_gencnt;
921 error = SYSCTL_OUT(req, &xig, sizeof xig);
925 error = syncache_pcblist(req, m, &pcb_count);
929 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
930 if (inp_list == NULL)
933 INP_INFO_RLOCK(&tcbinfo);
934 for (inp = LIST_FIRST(tcbinfo.ipi_listhead), i = 0; inp != NULL && i
935 < n; inp = LIST_NEXT(inp, inp_list)) {
937 if (inp->inp_gencnt <= gencnt) {
939 * XXX: This use of cr_cansee(), introduced with
940 * TCP state changes, is not quite right, but for
941 * now, better than nothing.
943 if (inp->inp_vflag & INP_TIMEWAIT) {
944 if (intotw(inp) != NULL)
945 error = cr_cansee(req->td->td_ucred,
946 intotw(inp)->tw_cred);
948 error = EINVAL; /* Skip this inp. */
950 error = cr_canseesocket(req->td->td_ucred,
957 INP_INFO_RUNLOCK(&tcbinfo);
961 for (i = 0; i < n; i++) {
964 if (inp->inp_gencnt <= gencnt) {
968 bzero(&xt, sizeof(xt));
969 xt.xt_len = sizeof xt;
970 /* XXX should avoid extra copy */
971 bcopy(inp, &xt.xt_inp, sizeof *inp);
972 inp_ppcb = inp->inp_ppcb;
973 if (inp_ppcb == NULL)
974 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
975 else if (inp->inp_vflag & INP_TIMEWAIT) {
976 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
977 xt.xt_tp.t_state = TCPS_TIME_WAIT;
979 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
980 if (inp->inp_socket != NULL)
981 sotoxsocket(inp->inp_socket, &xt.xt_socket);
983 bzero(&xt.xt_socket, sizeof xt.xt_socket);
984 xt.xt_socket.xso_protocol = IPPROTO_TCP;
986 xt.xt_inp.inp_gencnt = inp->inp_gencnt;
988 error = SYSCTL_OUT(req, &xt, sizeof xt);
995 * Give the user an updated idea of our state.
996 * If the generation differs from what we told
997 * her before, she knows that something happened
998 * while we were processing this request, and it
999 * might be necessary to retry.
1001 INP_INFO_RLOCK(&tcbinfo);
1002 xig.xig_gen = tcbinfo.ipi_gencnt;
1003 xig.xig_sogen = so_gencnt;
1004 xig.xig_count = tcbinfo.ipi_count + pcb_count;
1005 INP_INFO_RUNLOCK(&tcbinfo);
1006 error = SYSCTL_OUT(req, &xig, sizeof xig);
1008 free(inp_list, M_TEMP);
1012 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
1013 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
1016 tcp_getcred(SYSCTL_HANDLER_ARGS)
1019 struct sockaddr_in addrs[2];
1023 error = priv_check(req->td, PRIV_NETINET_GETCRED);
1026 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1029 INP_INFO_RLOCK(&tcbinfo);
1030 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
1031 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
1037 if (inp->inp_socket == NULL) {
1041 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket);
1044 cru2x(inp->inp_socket->so_cred, &xuc);
1048 INP_INFO_RUNLOCK(&tcbinfo);
1050 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1054 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
1055 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1056 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection");
1060 tcp6_getcred(SYSCTL_HANDLER_ARGS)
1063 struct sockaddr_in6 addrs[2];
1065 int error, mapped = 0;
1067 error = priv_check(req->td, PRIV_NETINET_GETCRED);
1070 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1073 if ((error = sa6_embedscope(&addrs[0], ip6_use_defzone)) != 0 ||
1074 (error = sa6_embedscope(&addrs[1], ip6_use_defzone)) != 0) {
1077 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
1078 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
1084 INP_INFO_RLOCK(&tcbinfo);
1086 inp = in_pcblookup_hash(&tcbinfo,
1087 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
1089 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
1093 inp = in6_pcblookup_hash(&tcbinfo,
1094 &addrs[1].sin6_addr, addrs[1].sin6_port,
1095 &addrs[0].sin6_addr, addrs[0].sin6_port, 0, NULL);
1101 if (inp->inp_socket == NULL) {
1105 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket);
1108 cru2x(inp->inp_socket->so_cred, &xuc);
1112 INP_INFO_RUNLOCK(&tcbinfo);
1114 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1118 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
1119 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1120 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection");
1125 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
1127 struct ip *ip = vip;
1129 struct in_addr faddr;
1132 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1134 struct in_conninfo inc;
1135 tcp_seq icmp_tcp_seq;
1138 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1139 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1142 if (cmd == PRC_MSGSIZE)
1143 notify = tcp_mtudisc;
1144 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1145 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip)
1146 notify = tcp_drop_syn_sent;
1148 * Redirects don't need to be handled up here.
1150 else if (PRC_IS_REDIRECT(cmd))
1153 * Source quench is depreciated.
1155 else if (cmd == PRC_QUENCH)
1158 * Hostdead is ugly because it goes linearly through all PCBs.
1159 * XXX: We never get this from ICMP, otherwise it makes an
1160 * excellent DoS attack on machines with many connections.
1162 else if (cmd == PRC_HOSTDEAD)
1164 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
1167 icp = (struct icmp *)((caddr_t)ip
1168 - offsetof(struct icmp, icmp_ip));
1169 th = (struct tcphdr *)((caddr_t)ip
1170 + (ip->ip_hl << 2));
1171 INP_INFO_WLOCK(&tcbinfo);
1172 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
1173 ip->ip_src, th->th_sport, 0, NULL);
1176 if (!(inp->inp_vflag & INP_TIMEWAIT) &&
1177 !(inp->inp_vflag & INP_DROPPED) &&
1178 !(inp->inp_socket == NULL)) {
1179 icmp_tcp_seq = htonl(th->th_seq);
1180 tp = intotcpcb(inp);
1181 if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
1182 SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
1183 if (cmd == PRC_MSGSIZE) {
1186 * If we got a needfrag set the MTU
1187 * in the route to the suggested new
1188 * value (if given) and then notify.
1190 bzero(&inc, sizeof(inc));
1191 inc.inc_flags = 0; /* IPv4 */
1192 inc.inc_faddr = faddr;
1194 mtu = ntohs(icp->icmp_nextmtu);
1196 * If no alternative MTU was
1197 * proposed, try the next smaller
1198 * one. ip->ip_len has already
1199 * been swapped in icmp_input().
1202 mtu = ip_next_mtu(ip->ip_len,
1204 if (mtu < max(296, (tcp_minmss)
1205 + sizeof(struct tcpiphdr)))
1209 + sizeof(struct tcpiphdr);
1211 * Only cache the the MTU if it
1212 * is smaller than the interface
1213 * or route MTU. tcp_mtudisc()
1214 * will do right thing by itself.
1216 if (mtu <= tcp_maxmtu(&inc, NULL))
1217 tcp_hc_updatemtu(&inc, mtu);
1220 inp = (*notify)(inp, inetctlerrmap[cmd]);
1226 inc.inc_fport = th->th_dport;
1227 inc.inc_lport = th->th_sport;
1228 inc.inc_faddr = faddr;
1229 inc.inc_laddr = ip->ip_src;
1233 syncache_unreach(&inc, th);
1235 INP_INFO_WUNLOCK(&tcbinfo);
1237 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
1242 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
1245 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1246 struct ip6_hdr *ip6;
1248 struct ip6ctlparam *ip6cp = NULL;
1249 const struct sockaddr_in6 *sa6_src = NULL;
1251 struct tcp_portonly {
1256 if (sa->sa_family != AF_INET6 ||
1257 sa->sa_len != sizeof(struct sockaddr_in6))
1260 if (cmd == PRC_MSGSIZE)
1261 notify = tcp_mtudisc;
1262 else if (!PRC_IS_REDIRECT(cmd) &&
1263 ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
1265 /* Source quench is depreciated. */
1266 else if (cmd == PRC_QUENCH)
1269 /* if the parameter is from icmp6, decode it. */
1271 ip6cp = (struct ip6ctlparam *)d;
1273 ip6 = ip6cp->ip6c_ip6;
1274 off = ip6cp->ip6c_off;
1275 sa6_src = ip6cp->ip6c_src;
1279 off = 0; /* fool gcc */
1284 struct in_conninfo inc;
1286 * XXX: We assume that when IPV6 is non NULL,
1287 * M and OFF are valid.
1290 /* check if we can safely examine src and dst ports */
1291 if (m->m_pkthdr.len < off + sizeof(*thp))
1294 bzero(&th, sizeof(th));
1295 m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
1297 in6_pcbnotify(&tcbinfo, sa, th.th_dport,
1298 (struct sockaddr *)ip6cp->ip6c_src,
1299 th.th_sport, cmd, NULL, notify);
1301 inc.inc_fport = th.th_dport;
1302 inc.inc_lport = th.th_sport;
1303 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
1304 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
1306 INP_INFO_WLOCK(&tcbinfo);
1307 syncache_unreach(&inc, &th);
1308 INP_INFO_WUNLOCK(&tcbinfo);
1310 in6_pcbnotify(&tcbinfo, sa, 0, (const struct sockaddr *)sa6_src,
1311 0, cmd, NULL, notify);
1317 * Following is where TCP initial sequence number generation occurs.
1319 * There are two places where we must use initial sequence numbers:
1320 * 1. In SYN-ACK packets.
1321 * 2. In SYN packets.
1323 * All ISNs for SYN-ACK packets are generated by the syncache. See
1324 * tcp_syncache.c for details.
1326 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1327 * depends on this property. In addition, these ISNs should be
1328 * unguessable so as to prevent connection hijacking. To satisfy
1329 * the requirements of this situation, the algorithm outlined in
1330 * RFC 1948 is used, with only small modifications.
1332 * Implementation details:
1334 * Time is based off the system timer, and is corrected so that it
1335 * increases by one megabyte per second. This allows for proper
1336 * recycling on high speed LANs while still leaving over an hour
1339 * As reading the *exact* system time is too expensive to be done
1340 * whenever setting up a TCP connection, we increment the time
1341 * offset in two ways. First, a small random positive increment
1342 * is added to isn_offset for each connection that is set up.
1343 * Second, the function tcp_isn_tick fires once per clock tick
1344 * and increments isn_offset as necessary so that sequence numbers
1345 * are incremented at approximately ISN_BYTES_PER_SECOND. The
1346 * random positive increments serve only to ensure that the same
1347 * exact sequence number is never sent out twice (as could otherwise
1348 * happen when a port is recycled in less than the system tick
1351 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1352 * between seeding of isn_secret. This is normally set to zero,
1353 * as reseeding should not be necessary.
1355 * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
1356 * isn_offset_old, and isn_ctx is performed using the TCP pcbinfo lock. In
1357 * general, this means holding an exclusive (write) lock.
1360 #define ISN_BYTES_PER_SECOND 1048576
1361 #define ISN_STATIC_INCREMENT 4096
1362 #define ISN_RANDOM_INCREMENT (4096 - 1)
1364 static u_char isn_secret[32];
1365 static int isn_last_reseed;
1366 static u_int32_t isn_offset, isn_offset_old;
1367 static MD5_CTX isn_ctx;
1370 tcp_new_isn(struct tcpcb *tp)
1372 u_int32_t md5_buffer[4];
1375 INP_LOCK_ASSERT(tp->t_inpcb);
1378 /* Seed if this is the first use, reseed if requested. */
1379 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) &&
1380 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1382 read_random(&isn_secret, sizeof(isn_secret));
1383 isn_last_reseed = ticks;
1386 /* Compute the md5 hash and return the ISN. */
1388 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
1389 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
1391 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
1392 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1393 sizeof(struct in6_addr));
1394 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1395 sizeof(struct in6_addr));
1399 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1400 sizeof(struct in_addr));
1401 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1402 sizeof(struct in_addr));
1404 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1405 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1406 new_isn = (tcp_seq) md5_buffer[0];
1407 isn_offset += ISN_STATIC_INCREMENT +
1408 (arc4random() & ISN_RANDOM_INCREMENT);
1409 new_isn += isn_offset;
1415 * Increment the offset to the next ISN_BYTES_PER_SECOND / 100 boundary
1416 * to keep time flowing at a relatively constant rate. If the random
1417 * increments have already pushed us past the projected offset, do nothing.
1420 tcp_isn_tick(void *xtp)
1422 u_int32_t projected_offset;
1425 projected_offset = isn_offset_old + ISN_BYTES_PER_SECOND / 100;
1427 if (SEQ_GT(projected_offset, isn_offset))
1428 isn_offset = projected_offset;
1430 isn_offset_old = isn_offset;
1431 callout_reset(&isn_callout, hz/100, tcp_isn_tick, NULL);
1436 * When a specific ICMP unreachable message is received and the
1437 * connection state is SYN-SENT, drop the connection. This behavior
1438 * is controlled by the icmp_may_rst sysctl.
1441 tcp_drop_syn_sent(struct inpcb *inp, int errno)
1445 INP_INFO_WLOCK_ASSERT(&tcbinfo);
1446 INP_LOCK_ASSERT(inp);
1448 if ((inp->inp_vflag & INP_TIMEWAIT) ||
1449 (inp->inp_vflag & INP_DROPPED))
1452 tp = intotcpcb(inp);
1453 if (tp->t_state != TCPS_SYN_SENT)
1456 tp = tcp_drop(tp, errno);
1464 * When `need fragmentation' ICMP is received, update our idea of the MSS
1465 * based on the new value in the route. Also nudge TCP to send something,
1466 * since we know the packet we just sent was dropped.
1467 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1470 tcp_mtudisc(struct inpcb *inp, int errno)
1473 struct socket *so = inp->inp_socket;
1481 INP_LOCK_ASSERT(inp);
1482 if ((inp->inp_vflag & INP_TIMEWAIT) ||
1483 (inp->inp_vflag & INP_DROPPED))
1486 tp = intotcpcb(inp);
1487 KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL"));
1490 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
1492 maxmtu = tcp_hc_getmtu(&inp->inp_inc); /* IPv4 and IPv6 */
1495 isipv6 ? tcp_maxmtu6(&inp->inp_inc, NULL) :
1497 tcp_maxmtu(&inp->inp_inc, NULL);
1501 maxmtu = min(maxmtu, romtu);
1503 tp->t_maxopd = tp->t_maxseg =
1505 isipv6 ? tcp_v6mssdflt :
1512 (isipv6 ? sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1514 sizeof(struct tcpiphdr)
1521 * XXX - The above conditional probably violates the TCP
1522 * spec. The problem is that, since we don't know the
1523 * other end's MSS, we are supposed to use a conservative
1524 * default. But, if we do that, then MTU discovery will
1525 * never actually take place, because the conservative
1526 * default is much less than the MTUs typically seen
1527 * on the Internet today. For the moment, we'll sweep
1528 * this under the carpet.
1530 * The conservative default might not actually be a problem
1531 * if the only case this occurs is when sending an initial
1532 * SYN with options and data to a host we've never talked
1533 * to before. Then, they will reply with an MSS value which
1534 * will get recorded and the new parameters should get
1535 * recomputed. For Further Study.
1537 if (tp->t_maxopd <= mss)
1541 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
1542 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
1543 mss -= TCPOLEN_TSTAMP_APPA;
1544 #if (MCLBYTES & (MCLBYTES - 1)) == 0
1546 mss &= ~(MCLBYTES-1);
1549 mss = mss / MCLBYTES * MCLBYTES;
1551 if (so->so_snd.sb_hiwat < mss)
1552 mss = so->so_snd.sb_hiwat;
1556 tcpstat.tcps_mturesent++;
1558 tp->snd_nxt = tp->snd_una;
1559 tcp_free_sackholes(tp);
1560 tp->snd_recover = tp->snd_max;
1561 if (tp->t_flags & TF_SACK_PERMIT)
1562 EXIT_FASTRECOVERY(tp);
1568 * Look-up the routing entry to the peer of this inpcb. If no route
1569 * is found and it cannot be allocated, then return NULL. This routine
1570 * is called by TCP routines that access the rmx structure and by tcp_mss
1571 * to get the interface MTU.
1574 tcp_maxmtu(struct in_conninfo *inc, int *flags)
1577 struct sockaddr_in *dst;
1581 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
1583 bzero(&sro, sizeof(sro));
1584 if (inc->inc_faddr.s_addr != INADDR_ANY) {
1585 dst = (struct sockaddr_in *)&sro.ro_dst;
1586 dst->sin_family = AF_INET;
1587 dst->sin_len = sizeof(*dst);
1588 dst->sin_addr = inc->inc_faddr;
1589 rtalloc_ign(&sro, RTF_CLONING);
1591 if (sro.ro_rt != NULL) {
1592 ifp = sro.ro_rt->rt_ifp;
1593 if (sro.ro_rt->rt_rmx.rmx_mtu == 0)
1594 maxmtu = ifp->if_mtu;
1596 maxmtu = min(sro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu);
1598 /* Report additional interface capabilities. */
1599 if (flags != NULL) {
1600 if (ifp->if_capenable & IFCAP_TSO4 &&
1601 ifp->if_hwassist & CSUM_TSO)
1611 tcp_maxmtu6(struct in_conninfo *inc, int *flags)
1613 struct route_in6 sro6;
1617 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
1619 bzero(&sro6, sizeof(sro6));
1620 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
1621 sro6.ro_dst.sin6_family = AF_INET6;
1622 sro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6);
1623 sro6.ro_dst.sin6_addr = inc->inc6_faddr;
1624 rtalloc_ign((struct route *)&sro6, RTF_CLONING);
1626 if (sro6.ro_rt != NULL) {
1627 ifp = sro6.ro_rt->rt_ifp;
1628 if (sro6.ro_rt->rt_rmx.rmx_mtu == 0)
1629 maxmtu = IN6_LINKMTU(sro6.ro_rt->rt_ifp);
1631 maxmtu = min(sro6.ro_rt->rt_rmx.rmx_mtu,
1632 IN6_LINKMTU(sro6.ro_rt->rt_ifp));
1634 /* Report additional interface capabilities. */
1635 if (flags != NULL) {
1636 if (ifp->if_capenable & IFCAP_TSO6 &&
1637 ifp->if_hwassist & CSUM_TSO)
1648 /* compute ESP/AH header size for TCP, including outer IP header. */
1650 ipsec_hdrsiz_tcp(struct tcpcb *tp)
1657 struct ip6_hdr *ip6;
1661 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1663 MGETHDR(m, M_DONTWAIT, MT_DATA);
1668 if ((inp->inp_vflag & INP_IPV6) != 0) {
1669 ip6 = mtod(m, struct ip6_hdr *);
1670 th = (struct tcphdr *)(ip6 + 1);
1671 m->m_pkthdr.len = m->m_len =
1672 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1673 tcpip_fillheaders(inp, ip6, th);
1674 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1678 ip = mtod(m, struct ip *);
1679 th = (struct tcphdr *)(ip + 1);
1680 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1681 tcpip_fillheaders(inp, ip, th);
1682 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1691 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
1693 * This code attempts to calculate the bandwidth-delay product as a
1694 * means of determining the optimal window size to maximize bandwidth,
1695 * minimize RTT, and avoid the over-allocation of buffers on interfaces and
1696 * routers. This code also does a fairly good job keeping RTTs in check
1697 * across slow links like modems. We implement an algorithm which is very
1698 * similar (but not meant to be) TCP/Vegas. The code operates on the
1699 * transmitter side of a TCP connection and so only effects the transmit
1700 * side of the connection.
1702 * BACKGROUND: TCP makes no provision for the management of buffer space
1703 * at the end points or at the intermediate routers and switches. A TCP
1704 * stream, whether using NewReno or not, will eventually buffer as
1705 * many packets as it is able and the only reason this typically works is
1706 * due to the fairly small default buffers made available for a connection
1707 * (typicaly 16K or 32K). As machines use larger windows and/or window
1708 * scaling it is now fairly easy for even a single TCP connection to blow-out
1709 * all available buffer space not only on the local interface, but on
1710 * intermediate routers and switches as well. NewReno makes a misguided
1711 * attempt to 'solve' this problem by waiting for an actual failure to occur,
1712 * then backing off, then steadily increasing the window again until another
1713 * failure occurs, ad-infinitum. This results in terrible oscillation that
1714 * is only made worse as network loads increase and the idea of intentionally
1715 * blowing out network buffers is, frankly, a terrible way to manage network
1718 * It is far better to limit the transmit window prior to the failure
1719 * condition being achieved. There are two general ways to do this: First
1720 * you can 'scan' through different transmit window sizes and locate the
1721 * point where the RTT stops increasing, indicating that you have filled the
1722 * pipe, then scan backwards until you note that RTT stops decreasing, then
1723 * repeat ad-infinitum. This method works in principle but has severe
1724 * implementation issues due to RTT variances, timer granularity, and
1725 * instability in the algorithm which can lead to many false positives and
1726 * create oscillations as well as interact badly with other TCP streams
1727 * implementing the same algorithm.
1729 * The second method is to limit the window to the bandwidth delay product
1730 * of the link. This is the method we implement. RTT variances and our
1731 * own manipulation of the congestion window, bwnd, can potentially
1732 * destabilize the algorithm. For this reason we have to stabilize the
1733 * elements used to calculate the window. We do this by using the minimum
1734 * observed RTT, the long term average of the observed bandwidth, and
1735 * by adding two segments worth of slop. It isn't perfect but it is able
1736 * to react to changing conditions and gives us a very stable basis on
1737 * which to extend the algorithm.
1740 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
1746 INP_LOCK_ASSERT(tp->t_inpcb);
1749 * If inflight_enable is disabled in the middle of a tcp connection,
1750 * make sure snd_bwnd is effectively disabled.
1752 if (tcp_inflight_enable == 0 || tp->t_rttlow < tcp_inflight_rttthresh) {
1753 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1754 tp->snd_bandwidth = 0;
1759 * Figure out the bandwidth. Due to the tick granularity this
1760 * is a very rough number and it MUST be averaged over a fairly
1761 * long period of time. XXX we need to take into account a link
1762 * that is not using all available bandwidth, but for now our
1763 * slop will ramp us up if this case occurs and the bandwidth later
1766 * Note: if ticks rollover 'bw' may wind up negative. We must
1767 * effectively reset t_bw_rtttime for this case.
1770 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1)
1773 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz /
1774 (save_ticks - tp->t_bw_rtttime);
1775 tp->t_bw_rtttime = save_ticks;
1776 tp->t_bw_rtseq = ack_seq;
1777 if (tp->t_bw_rtttime == 0 || (int)bw < 0)
1779 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4;
1781 tp->snd_bandwidth = bw;
1784 * Calculate the semi-static bandwidth delay product, plus two maximal
1785 * segments. The additional slop puts us squarely in the sweet
1786 * spot and also handles the bandwidth run-up case and stabilization.
1787 * Without the slop we could be locking ourselves into a lower
1790 * Situations Handled:
1791 * (1) Prevents over-queueing of packets on LANs, especially on
1792 * high speed LANs, allowing larger TCP buffers to be
1793 * specified, and also does a good job preventing
1794 * over-queueing of packets over choke points like modems
1795 * (at least for the transmit side).
1797 * (2) Is able to handle changing network loads (bandwidth
1798 * drops so bwnd drops, bandwidth increases so bwnd
1801 * (3) Theoretically should stabilize in the face of multiple
1802 * connections implementing the same algorithm (this may need
1805 * (4) Stability value (defaults to 20 = 2 maximal packets) can
1806 * be adjusted with a sysctl but typically only needs to be
1807 * on very slow connections. A value no smaller then 5
1808 * should be used, but only reduce this default if you have
1811 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2)
1812 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * tp->t_maxseg / 10;
1815 if (tcp_inflight_debug > 0) {
1817 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) {
1819 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n",
1828 if ((long)bwnd < tcp_inflight_min)
1829 bwnd = tcp_inflight_min;
1830 if (bwnd > tcp_inflight_max)
1831 bwnd = tcp_inflight_max;
1832 if ((long)bwnd < tp->t_maxseg * 2)
1833 bwnd = tp->t_maxseg * 2;
1834 tp->snd_bwnd = bwnd;
1837 #ifdef TCP_SIGNATURE
1839 * Callback function invoked by m_apply() to digest TCP segment data
1840 * contained within an mbuf chain.
1843 tcp_signature_apply(void *fstate, void *data, u_int len)
1846 MD5Update(fstate, (u_char *)data, len);
1851 * Compute TCP-MD5 hash of a TCPv4 segment. (RFC2385)
1854 * m pointer to head of mbuf chain
1855 * off0 offset to TCP header within the mbuf chain
1856 * len length of TCP segment data, excluding options
1857 * optlen length of TCP segment options
1858 * buf pointer to storage for computed MD5 digest
1859 * direction direction of flow (IPSEC_DIR_INBOUND or OUTBOUND)
1861 * We do this over ip, tcphdr, segment data, and the key in the SADB.
1862 * When called from tcp_input(), we can be sure that th_sum has been
1863 * zeroed out and verified already.
1865 * This function is for IPv4 use only. Calling this function with an
1866 * IPv6 packet in the mbuf chain will yield undefined results.
1868 * Return 0 if successful, otherwise return -1.
1870 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
1871 * search with the destination IP address, and a 'magic SPI' to be
1872 * determined by the application. This is hardcoded elsewhere to 1179
1873 * right now. Another branch of this code exists which uses the SPD to
1874 * specify per-application flows but it is unstable.
1877 tcp_signature_compute(struct mbuf *m, int off0, int len, int optlen,
1878 u_char *buf, u_int direction)
1880 union sockaddr_union dst;
1881 struct ippseudo ippseudo;
1885 struct ipovly *ipovly;
1886 struct secasvar *sav;
1890 KASSERT(m != NULL, ("NULL mbuf chain"));
1891 KASSERT(buf != NULL, ("NULL signature pointer"));
1893 /* Extract the destination from the IP header in the mbuf. */
1894 ip = mtod(m, struct ip *);
1895 bzero(&dst, sizeof(union sockaddr_union));
1896 dst.sa.sa_len = sizeof(struct sockaddr_in);
1897 dst.sa.sa_family = AF_INET;
1898 dst.sin.sin_addr = (direction == IPSEC_DIR_INBOUND) ?
1899 ip->ip_src : ip->ip_dst;
1901 /* Look up an SADB entry which matches the address of the peer. */
1902 sav = KEY_ALLOCSA(&dst, IPPROTO_TCP, htonl(TCP_SIG_SPI));
1904 printf("%s: SADB lookup failed for %s\n", __func__,
1905 inet_ntoa(dst.sin.sin_addr));
1910 ipovly = (struct ipovly *)ip;
1911 th = (struct tcphdr *)((u_char *)ip + off0);
1912 doff = off0 + sizeof(struct tcphdr) + optlen;
1915 * Step 1: Update MD5 hash with IP pseudo-header.
1917 * XXX The ippseudo header MUST be digested in network byte order,
1918 * or else we'll fail the regression test. Assume all fields we've
1919 * been doing arithmetic on have been in host byte order.
1920 * XXX One cannot depend on ipovly->ih_len here. When called from
1921 * tcp_output(), the underlying ip_len member has not yet been set.
1923 ippseudo.ippseudo_src = ipovly->ih_src;
1924 ippseudo.ippseudo_dst = ipovly->ih_dst;
1925 ippseudo.ippseudo_pad = 0;
1926 ippseudo.ippseudo_p = IPPROTO_TCP;
1927 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen);
1928 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo));
1931 * Step 2: Update MD5 hash with TCP header, excluding options.
1932 * The TCP checksum must be set to zero.
1934 savecsum = th->th_sum;
1936 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr));
1937 th->th_sum = savecsum;
1940 * Step 3: Update MD5 hash with TCP segment data.
1941 * Use m_apply() to avoid an early m_pullup().
1944 m_apply(m, doff, len, tcp_signature_apply, &ctx);
1947 * Step 4: Update MD5 hash with shared secret.
1949 MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth));
1950 MD5Final(buf, &ctx);
1952 key_sa_recordxfer(sav, m);
1956 #endif /* TCP_SIGNATURE */
1959 sysctl_drop(SYSCTL_HANDLER_ARGS)
1961 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
1962 struct sockaddr_storage addrs[2];
1966 struct sockaddr_in *fin, *lin;
1968 struct sockaddr_in6 *fin6, *lin6;
1969 struct in6_addr f6, l6;
1980 if (req->oldptr != NULL || req->oldlen != 0)
1982 if (req->newptr == NULL)
1984 if (req->newlen < sizeof(addrs))
1986 error = SYSCTL_IN(req, &addrs, sizeof(addrs));
1990 switch (addrs[0].ss_family) {
1993 fin6 = (struct sockaddr_in6 *)&addrs[0];
1994 lin6 = (struct sockaddr_in6 *)&addrs[1];
1995 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
1996 lin6->sin6_len != sizeof(struct sockaddr_in6))
1998 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
1999 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
2001 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
2002 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
2003 fin = (struct sockaddr_in *)&addrs[0];
2004 lin = (struct sockaddr_in *)&addrs[1];
2007 error = sa6_embedscope(fin6, ip6_use_defzone);
2010 error = sa6_embedscope(lin6, ip6_use_defzone);
2016 fin = (struct sockaddr_in *)&addrs[0];
2017 lin = (struct sockaddr_in *)&addrs[1];
2018 if (fin->sin_len != sizeof(struct sockaddr_in) ||
2019 lin->sin_len != sizeof(struct sockaddr_in))
2025 INP_INFO_WLOCK(&tcbinfo);
2026 switch (addrs[0].ss_family) {
2029 inp = in6_pcblookup_hash(&tcbinfo, &f6, fin6->sin6_port,
2030 &l6, lin6->sin6_port, 0, NULL);
2034 inp = in_pcblookup_hash(&tcbinfo, fin->sin_addr, fin->sin_port,
2035 lin->sin_addr, lin->sin_port, 0, NULL);
2040 if (inp->inp_vflag & INP_TIMEWAIT) {
2042 * XXXRW: There currently exists a state where an
2043 * inpcb is present, but its timewait state has been
2044 * discarded. For now, don't allow dropping of this
2050 } else if (!(inp->inp_vflag & INP_DROPPED) &&
2051 !(inp->inp_socket->so_options & SO_ACCEPTCONN)) {
2052 tp = intotcpcb(inp);
2053 tcp_drop(tp, ECONNABORTED);
2058 INP_INFO_WUNLOCK(&tcbinfo);
2062 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop,
2063 CTLTYPE_STRUCT|CTLFLAG_WR|CTLFLAG_SKIP, NULL,
2064 0, sysctl_drop, "", "Drop TCP connection");
2067 * Generate a standardized TCP log line for use throughout the
2068 * tcp subsystem. Memory allocation is done with M_NOWAIT to
2069 * allow use in the interrupt context.
2071 * NB: The caller MUST free(s, M_TCPLOG) the returned string.
2072 * NB: The function may return NULL if memory allocation failed.
2074 * Due to header inclusion and ordering limitations the struct ip
2075 * and ip6_hdr pointers have to be passed as void pointers.
2078 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
2085 const struct ip6_hdr *ip6;
2087 ip6 = (const struct ip6_hdr *)ip6hdr;
2089 ip = (struct ip *)ip4hdr;
2092 * The log line looks like this:
2093 * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>"
2095 size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") +
2096 sizeof(PRINT_TH_FLAGS) + 1 +
2098 2 * INET6_ADDRSTRLEN;
2100 2 * INET_ADDRSTRLEN;
2103 /* Is logging enabled? */
2104 if (tcp_log_debug == 0 && tcp_log_in_vain == 0)
2107 s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT);
2111 strcat(s, "TCP: [");
2114 if (inc && inc->inc_isipv6 == 0) {
2115 inet_ntoa_r(inc->inc_faddr, sp);
2117 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
2119 inet_ntoa_r(inc->inc_laddr, sp);
2121 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
2124 ip6_sprintf(sp, &inc->inc6_faddr);
2126 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
2128 ip6_sprintf(sp, &inc->inc6_laddr);
2130 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
2131 } else if (ip6 && th) {
2132 ip6_sprintf(sp, &ip6->ip6_src);
2134 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
2136 ip6_sprintf(sp, &ip6->ip6_dst);
2138 sprintf(sp, "]:%i", ntohs(th->th_dport));
2140 } else if (ip && th) {
2141 inet_ntoa_r(ip->ip_src, sp);
2143 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
2145 inet_ntoa_r(ip->ip_dst, sp);
2147 sprintf(sp, "]:%i", ntohs(th->th_dport));
2154 sprintf(sp, " tcpflags 0x%b", th->th_flags, PRINT_TH_FLAGS);
2155 if (*(s + size - 1) != '\0')
2156 panic("%s: string too long", __func__);