2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_compat.h"
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
40 #include "opt_tcpdebug.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/callout.h>
45 #include <sys/kernel.h>
46 #include <sys/sysctl.h>
47 #include <sys/malloc.h>
50 #include <sys/domain.h>
54 #include <sys/socket.h>
55 #include <sys/socketvar.h>
56 #include <sys/protosw.h>
57 #include <sys/random.h>
58 #include <sys/vimage.h>
62 #include <net/route.h>
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/ip.h>
69 #include <netinet/ip6.h>
71 #include <netinet/in_pcb.h>
73 #include <netinet6/in6_pcb.h>
75 #include <netinet/in_var.h>
76 #include <netinet/ip_var.h>
78 #include <netinet6/ip6_var.h>
79 #include <netinet6/scope6_var.h>
80 #include <netinet6/nd6.h>
82 #include <netinet/ip_icmp.h>
83 #include <netinet/tcp.h>
84 #include <netinet/tcp_fsm.h>
85 #include <netinet/tcp_seq.h>
86 #include <netinet/tcp_timer.h>
87 #include <netinet/tcp_var.h>
88 #include <netinet/tcp_syncache.h>
89 #include <netinet/tcp_offload.h>
91 #include <netinet6/tcp6_var.h>
93 #include <netinet/tcpip.h>
95 #include <netinet/tcp_debug.h>
97 #include <netinet6/ip6protosw.h>
100 #include <netipsec/ipsec.h>
101 #include <netipsec/xform.h>
103 #include <netipsec/ipsec6.h>
105 #include <netipsec/key.h>
108 #include <machine/in_cksum.h>
111 #include <security/mac/mac_framework.h>
113 int tcp_mssdflt = TCP_MSS;
115 int tcp_v6mssdflt = TCP6_MSS;
119 sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS)
124 error = sysctl_handle_int(oidp, &new, 0, req);
125 if (error == 0 && req->newptr) {
126 if (new < TCP_MINMSS)
134 SYSCTL_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLTYPE_INT|CTLFLAG_RW,
135 &tcp_mssdflt, 0, &sysctl_net_inet_tcp_mss_check, "I",
136 "Default TCP Maximum Segment Size");
140 sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS)
144 new = V_tcp_v6mssdflt;
145 error = sysctl_handle_int(oidp, &new, 0, req);
146 if (error == 0 && req->newptr) {
147 if (new < TCP_MINMSS)
150 V_tcp_v6mssdflt = new;
155 SYSCTL_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, CTLTYPE_INT|CTLFLAG_RW,
156 &tcp_v6mssdflt, 0, &sysctl_net_inet_tcp_mss_v6_check, "I",
157 "Default TCP Maximum Segment Size for IPv6");
161 * Minimum MSS we accept and use. This prevents DoS attacks where
162 * we are forced to a ridiculous low MSS like 20 and send hundreds
163 * of packets instead of one. The effect scales with the available
164 * bandwidth and quickly saturates the CPU and network interface
165 * with packet generation and sending. Set to zero to disable MINMSS
166 * checking. This setting prevents us from sending too small packets.
168 int tcp_minmss = TCP_MINMSS;
169 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
170 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
172 int tcp_do_rfc1323 = 1;
173 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
174 &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions");
176 static int tcp_log_debug = 0;
177 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW,
178 &tcp_log_debug, 0, "Log errors caused by incoming TCP segments");
180 static int tcp_tcbhashsize = 0;
181 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN,
182 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
184 static int do_tcpdrain = 1;
185 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW,
187 "Enable tcp_drain routine for extra help when low on mbufs");
189 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
190 &tcbinfo.ipi_count, 0, "Number of active PCBs");
192 static int icmp_may_rst = 1;
193 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW,
195 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
197 static int tcp_isn_reseed_interval = 0;
198 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
199 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
202 * TCP bandwidth limiting sysctls. Note that the default lower bound of
203 * 1024 exists only for debugging. A good production default would be
204 * something like 6100.
206 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, inflight, CTLFLAG_RW, 0,
207 "TCP inflight data limiting");
209 static int tcp_inflight_enable = 1;
210 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, enable, CTLFLAG_RW,
211 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting");
213 static int tcp_inflight_debug = 0;
214 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, debug, CTLFLAG_RW,
215 &tcp_inflight_debug, 0, "Debug TCP inflight calculations");
217 static int tcp_inflight_rttthresh;
218 SYSCTL_PROC(_net_inet_tcp_inflight, OID_AUTO, rttthresh, CTLTYPE_INT|CTLFLAG_RW,
219 &tcp_inflight_rttthresh, 0, sysctl_msec_to_ticks, "I",
220 "RTT threshold below which inflight will deactivate itself");
222 static int tcp_inflight_min = 6144;
223 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, min, CTLFLAG_RW,
224 &tcp_inflight_min, 0, "Lower-bound for TCP inflight window");
226 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT;
227 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, max, CTLFLAG_RW,
228 &tcp_inflight_max, 0, "Upper-bound for TCP inflight window");
230 static int tcp_inflight_stab = 20;
231 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, stab, CTLFLAG_RW,
232 &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets");
234 uma_zone_t sack_hole_zone;
236 static struct inpcb *tcp_notify(struct inpcb *, int);
237 static void tcp_isn_tick(void *);
240 * Target size of TCP PCB hash tables. Must be a power of two.
242 * Note that this can be overridden by the kernel environment
243 * variable net.inet.tcp.tcbhashsize
246 #define TCBHASHSIZE 512
251 * Callouts should be moved into struct tcp directly. They are currently
252 * separate because the tcpcb structure is exported to userland for sysctl
253 * parsing purposes, which do not know about callouts.
260 static uma_zone_t tcpcb_zone;
261 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers");
262 struct callout isn_callout;
263 static struct mtx isn_mtx;
265 #define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
266 #define ISN_LOCK() mtx_lock(&isn_mtx)
267 #define ISN_UNLOCK() mtx_unlock(&isn_mtx)
270 * TCP initialization.
273 tcp_zone_change(void *tag)
276 uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets);
277 uma_zone_set_max(tcpcb_zone, maxsockets);
278 tcp_tw_zone_change();
282 tcp_inpcb_init(void *mem, int size, int flags)
284 struct inpcb *inp = mem;
286 INP_LOCK_INIT(inp, "inp", "tcpinp");
294 int hashsize = TCBHASHSIZE;
295 tcp_delacktime = TCPTV_DELACK;
296 tcp_keepinit = TCPTV_KEEP_INIT;
297 tcp_keepidle = TCPTV_KEEP_IDLE;
298 tcp_keepintvl = TCPTV_KEEPINTVL;
299 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
301 tcp_rexmit_min = TCPTV_MIN;
302 if (tcp_rexmit_min < 1)
304 tcp_rexmit_slop = TCPTV_CPU_VAR;
305 V_tcp_inflight_rttthresh = TCPTV_INFLIGHT_RTTTHRESH;
306 tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT;
308 INP_INFO_LOCK_INIT(&V_tcbinfo, "tcp");
310 V_tcbinfo.ipi_listhead = &V_tcb;
311 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
312 if (!powerof2(hashsize)) {
313 printf("WARNING: TCB hash size not a power of 2\n");
314 hashsize = 512; /* safe default */
316 tcp_tcbhashsize = hashsize;
317 V_tcbinfo.ipi_hashbase = hashinit(hashsize, M_PCB,
318 &V_tcbinfo.ipi_hashmask);
319 V_tcbinfo.ipi_porthashbase = hashinit(hashsize, M_PCB,
320 &V_tcbinfo.ipi_porthashmask);
321 V_tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb),
322 NULL, NULL, tcp_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
323 uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets);
325 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
327 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
329 if (max_protohdr < TCP_MINPROTOHDR)
330 max_protohdr = TCP_MINPROTOHDR;
331 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
333 #undef TCP_MINPROTOHDR
335 * These have to be type stable for the benefit of the timers.
337 tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
338 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
339 uma_zone_set_max(tcpcb_zone, maxsockets);
345 callout_init(&isn_callout, CALLOUT_MPSAFE);
347 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
348 SHUTDOWN_PRI_DEFAULT);
349 sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
350 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
351 EVENTHANDLER_REGISTER(maxsockets_change, tcp_zone_change, NULL,
352 EVENTHANDLER_PRI_ANY);
359 callout_stop(&isn_callout);
363 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
364 * tcp_template used to store this data in mbufs, but we now recopy it out
365 * of the tcpcb each time to conserve mbufs.
368 tcpip_fillheaders(struct inpcb *inp, void *ip_ptr, void *tcp_ptr)
370 struct tcphdr *th = (struct tcphdr *)tcp_ptr;
372 INP_WLOCK_ASSERT(inp);
375 if ((inp->inp_vflag & INP_IPV6) != 0) {
378 ip6 = (struct ip6_hdr *)ip_ptr;
379 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
380 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
381 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
382 (IPV6_VERSION & IPV6_VERSION_MASK);
383 ip6->ip6_nxt = IPPROTO_TCP;
384 ip6->ip6_plen = htons(sizeof(struct tcphdr));
385 ip6->ip6_src = inp->in6p_laddr;
386 ip6->ip6_dst = inp->in6p_faddr;
392 ip = (struct ip *)ip_ptr;
393 ip->ip_v = IPVERSION;
395 ip->ip_tos = inp->inp_ip_tos;
399 ip->ip_ttl = inp->inp_ip_ttl;
401 ip->ip_p = IPPROTO_TCP;
402 ip->ip_src = inp->inp_laddr;
403 ip->ip_dst = inp->inp_faddr;
405 th->th_sport = inp->inp_lport;
406 th->th_dport = inp->inp_fport;
414 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */
418 * Create template to be used to send tcp packets on a connection.
419 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
420 * use for this function is in keepalives, which use tcp_respond.
423 tcpip_maketemplate(struct inpcb *inp)
427 t = malloc(sizeof(*t), M_TEMP, M_NOWAIT);
430 tcpip_fillheaders(inp, (void *)&t->tt_ipgen, (void *)&t->tt_t);
435 * Send a single message to the TCP at address specified by
436 * the given TCP/IP header. If m == NULL, then we make a copy
437 * of the tcpiphdr at ti and send directly to the addressed host.
438 * This is used to force keep alive messages out using the TCP
439 * template for a connection. If flags are given then we send
440 * a message back to the TCP which originated the * segment ti,
441 * and discard the mbuf containing it and any other attached mbufs.
443 * In any case the ack and sequence number of the transmitted
444 * segment are as specified by the parameters.
446 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
449 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
450 tcp_seq ack, tcp_seq seq, int flags)
463 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
466 isipv6 = ((struct ip *)ipgen)->ip_v == 6;
473 KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
474 INP_WLOCK_ASSERT(inp);
479 if (!(flags & TH_RST)) {
480 win = sbspace(&inp->inp_socket->so_rcv);
481 if (win > (long)TCP_MAXWIN << tp->rcv_scale)
482 win = (long)TCP_MAXWIN << tp->rcv_scale;
486 m = m_gethdr(M_DONTWAIT, MT_DATA);
490 m->m_data += max_linkhdr;
493 bcopy((caddr_t)ip6, mtod(m, caddr_t),
494 sizeof(struct ip6_hdr));
495 ip6 = mtod(m, struct ip6_hdr *);
496 nth = (struct tcphdr *)(ip6 + 1);
500 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
501 ip = mtod(m, struct ip *);
502 nth = (struct tcphdr *)(ip + 1);
504 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
509 * XXX MRT We inherrit the FIB, which is lucky.
513 m->m_data = (caddr_t)ipgen;
514 /* m_len is set later */
516 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
519 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
520 nth = (struct tcphdr *)(ip6 + 1);
524 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
525 nth = (struct tcphdr *)(ip + 1);
529 * this is usually a case when an extension header
530 * exists between the IPv6 header and the
533 nth->th_sport = th->th_sport;
534 nth->th_dport = th->th_dport;
536 xchg(nth->th_dport, nth->th_sport, n_short);
542 ip6->ip6_vfc = IPV6_VERSION;
543 ip6->ip6_nxt = IPPROTO_TCP;
544 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
546 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
550 tlen += sizeof (struct tcpiphdr);
552 ip->ip_ttl = V_ip_defttl;
553 if (V_path_mtu_discovery)
557 m->m_pkthdr.len = tlen;
558 m->m_pkthdr.rcvif = NULL;
562 * Packet is associated with a socket, so allow the
563 * label of the response to reflect the socket label.
565 INP_WLOCK_ASSERT(inp);
566 mac_inpcb_create_mbuf(inp, m);
569 * Packet is not associated with a socket, so possibly
570 * update the label in place.
572 mac_netinet_tcp_reply(m);
575 nth->th_seq = htonl(seq);
576 nth->th_ack = htonl(ack);
578 nth->th_off = sizeof (struct tcphdr) >> 2;
579 nth->th_flags = flags;
581 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
583 nth->th_win = htons((u_short)win);
588 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
589 sizeof(struct ip6_hdr),
590 tlen - sizeof(struct ip6_hdr));
591 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
596 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
597 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
598 m->m_pkthdr.csum_flags = CSUM_TCP;
599 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
602 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
603 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
607 (void) ip6_output(m, NULL, NULL, ipflags, NULL, NULL, inp);
610 (void) ip_output(m, NULL, NULL, ipflags, NULL, inp);
614 * Create a new TCP control block, making an
615 * empty reassembly queue and hooking it to the argument
616 * protocol control block. The `inp' parameter must have
617 * come from the zone allocator set up in tcp_init().
620 tcp_newtcpcb(struct inpcb *inp)
622 struct tcpcb_mem *tm;
625 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
628 tm = uma_zalloc(tcpcb_zone, M_NOWAIT | M_ZERO);
632 tp->t_timers = &tm->tt;
633 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */
634 tp->t_maxseg = tp->t_maxopd =
636 isipv6 ? V_tcp_v6mssdflt :
640 /* Set up our timeouts. */
641 callout_init(&tp->t_timers->tt_rexmt, CALLOUT_MPSAFE);
642 callout_init(&tp->t_timers->tt_persist, CALLOUT_MPSAFE);
643 callout_init(&tp->t_timers->tt_keep, CALLOUT_MPSAFE);
644 callout_init(&tp->t_timers->tt_2msl, CALLOUT_MPSAFE);
645 callout_init(&tp->t_timers->tt_delack, CALLOUT_MPSAFE);
647 if (V_tcp_do_rfc1323)
648 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
650 tp->t_flags |= TF_SACK_PERMIT;
651 TAILQ_INIT(&tp->snd_holes);
652 tp->t_inpcb = inp; /* XXX */
654 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
655 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
656 * reasonable initial retransmit time.
658 tp->t_srtt = TCPTV_SRTTBASE;
659 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
660 tp->t_rttmin = tcp_rexmit_min;
661 tp->t_rxtcur = TCPTV_RTOBASE;
662 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
663 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
664 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
665 tp->t_rcvtime = ticks;
666 tp->t_bw_rtttime = ticks;
668 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
669 * because the socket may be bound to an IPv6 wildcard address,
670 * which may match an IPv4-mapped IPv6 address.
672 inp->inp_ip_ttl = V_ip_defttl;
674 return (tp); /* XXX */
678 * Drop a TCP connection, reporting
679 * the specified error. If connection is synchronized,
680 * then send a RST to peer.
683 tcp_drop(struct tcpcb *tp, int errno)
685 struct socket *so = tp->t_inpcb->inp_socket;
687 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
688 INP_WLOCK_ASSERT(tp->t_inpcb);
690 if (TCPS_HAVERCVDSYN(tp->t_state)) {
691 tp->t_state = TCPS_CLOSED;
692 (void) tcp_output_reset(tp);
693 V_tcpstat.tcps_drops++;
695 V_tcpstat.tcps_conndrops++;
696 if (errno == ETIMEDOUT && tp->t_softerror)
697 errno = tp->t_softerror;
698 so->so_error = errno;
699 return (tcp_close(tp));
703 tcp_discardcb(struct tcpcb *tp)
706 struct inpcb *inp = tp->t_inpcb;
707 struct socket *so = inp->inp_socket;
709 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
712 INP_WLOCK_ASSERT(inp);
715 * Make sure that all of our timers are stopped before we
718 callout_stop(&tp->t_timers->tt_rexmt);
719 callout_stop(&tp->t_timers->tt_persist);
720 callout_stop(&tp->t_timers->tt_keep);
721 callout_stop(&tp->t_timers->tt_2msl);
722 callout_stop(&tp->t_timers->tt_delack);
725 * If we got enough samples through the srtt filter,
726 * save the rtt and rttvar in the routing entry.
727 * 'Enough' is arbitrarily defined as 4 rtt samples.
728 * 4 samples is enough for the srtt filter to converge
729 * to within enough % of the correct value; fewer samples
730 * and we could save a bogus rtt. The danger is not high
731 * as tcp quickly recovers from everything.
732 * XXX: Works very well but needs some more statistics!
734 if (tp->t_rttupdated >= 4) {
735 struct hc_metrics_lite metrics;
738 bzero(&metrics, sizeof(metrics));
740 * Update the ssthresh always when the conditions below
741 * are satisfied. This gives us better new start value
742 * for the congestion avoidance for new connections.
743 * ssthresh is only set if packet loss occured on a session.
745 * XXXRW: 'so' may be NULL here, and/or socket buffer may be
746 * being torn down. Ideally this code would not use 'so'.
748 ssthresh = tp->snd_ssthresh;
749 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
751 * convert the limit from user data bytes to
752 * packets then to packet data bytes.
754 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
757 ssthresh *= (u_long)(tp->t_maxseg +
759 (isipv6 ? sizeof (struct ip6_hdr) +
760 sizeof (struct tcphdr) :
762 sizeof (struct tcpiphdr)
769 metrics.rmx_ssthresh = ssthresh;
771 metrics.rmx_rtt = tp->t_srtt;
772 metrics.rmx_rttvar = tp->t_rttvar;
773 /* XXX: This wraps if the pipe is more than 4 Gbit per second */
774 metrics.rmx_bandwidth = tp->snd_bandwidth;
775 metrics.rmx_cwnd = tp->snd_cwnd;
776 metrics.rmx_sendpipe = 0;
777 metrics.rmx_recvpipe = 0;
779 tcp_hc_update(&inp->inp_inc, &metrics);
782 /* free the reassembly queue, if any */
783 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) {
784 LIST_REMOVE(q, tqe_q);
786 uma_zfree(tcp_reass_zone, q);
790 /* Disconnect offload device, if any. */
791 tcp_offload_detach(tp);
793 tcp_free_sackholes(tp);
794 inp->inp_ppcb = NULL;
796 uma_zfree(tcpcb_zone, tp);
800 * Attempt to close a TCP control block, marking it as dropped, and freeing
801 * the socket if we hold the only reference.
804 tcp_close(struct tcpcb *tp)
806 struct inpcb *inp = tp->t_inpcb;
809 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
810 INP_WLOCK_ASSERT(inp);
812 /* Notify any offload devices of listener close */
813 if (tp->t_state == TCPS_LISTEN)
814 tcp_offload_listen_close(tp);
816 V_tcpstat.tcps_closed++;
817 KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
818 so = inp->inp_socket;
819 soisdisconnected(so);
820 if (inp->inp_vflag & INP_SOCKREF) {
821 KASSERT(so->so_state & SS_PROTOREF,
822 ("tcp_close: !SS_PROTOREF"));
823 inp->inp_vflag &= ~INP_SOCKREF;
827 so->so_state &= ~SS_PROTOREF;
841 struct tseg_qent *te;
844 * Walk the tcpbs, if existing, and flush the reassembly queue,
846 * XXX: The "Net/3" implementation doesn't imply that the TCP
847 * reassembly queue should be flushed, but in a situation
848 * where we're really low on mbufs, this is potentially
851 INP_INFO_RLOCK(&V_tcbinfo);
852 LIST_FOREACH(inpb, V_tcbinfo.ipi_listhead, inp_list) {
853 if (inpb->inp_vflag & INP_TIMEWAIT)
856 if ((tcpb = intotcpcb(inpb)) != NULL) {
857 while ((te = LIST_FIRST(&tcpb->t_segq))
859 LIST_REMOVE(te, tqe_q);
861 uma_zfree(tcp_reass_zone, te);
865 tcp_clean_sackreport(tcpb);
869 INP_INFO_RUNLOCK(&V_tcbinfo);
874 * Notify a tcp user of an asynchronous error;
875 * store error as soft error, but wake up user
876 * (for now, won't do anything until can select for soft error).
878 * Do not wake up user since there currently is no mechanism for
879 * reporting soft errors (yet - a kqueue filter may be added).
881 static struct inpcb *
882 tcp_notify(struct inpcb *inp, int error)
886 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
887 INP_WLOCK_ASSERT(inp);
889 if ((inp->inp_vflag & INP_TIMEWAIT) ||
890 (inp->inp_vflag & INP_DROPPED))
894 KASSERT(tp != NULL, ("tcp_notify: tp == NULL"));
897 * Ignore some errors if we are hooked up.
898 * If connection hasn't completed, has retransmitted several times,
899 * and receives a second error, give up now. This is better
900 * than waiting a long time to establish a connection that
901 * can never complete.
903 if (tp->t_state == TCPS_ESTABLISHED &&
904 (error == EHOSTUNREACH || error == ENETUNREACH ||
905 error == EHOSTDOWN)) {
907 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
909 tp = tcp_drop(tp, error);
915 tp->t_softerror = error;
919 wakeup( &so->so_timeo);
926 tcp_pcblist(SYSCTL_HANDLER_ARGS)
928 int error, i, m, n, pcb_count;
929 struct inpcb *inp, **inp_list;
934 * The process of preparing the TCB list is too time-consuming and
935 * resource-intensive to repeat twice on every request.
937 if (req->oldptr == NULL) {
938 m = syncache_pcbcount();
939 n = V_tcbinfo.ipi_count;
940 req->oldidx = 2 * (sizeof xig)
941 + ((m + n) + n/8) * sizeof(struct xtcpcb);
945 if (req->newptr != NULL)
949 * OK, now we're committed to doing something.
951 INP_INFO_RLOCK(&V_tcbinfo);
952 gencnt = V_tcbinfo.ipi_gencnt;
953 n = V_tcbinfo.ipi_count;
954 INP_INFO_RUNLOCK(&V_tcbinfo);
956 m = syncache_pcbcount();
958 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
959 + (n + m) * sizeof(struct xtcpcb));
963 xig.xig_len = sizeof xig;
964 xig.xig_count = n + m;
965 xig.xig_gen = gencnt;
966 xig.xig_sogen = so_gencnt;
967 error = SYSCTL_OUT(req, &xig, sizeof xig);
971 error = syncache_pcblist(req, m, &pcb_count);
975 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
976 if (inp_list == NULL)
979 INP_INFO_RLOCK(&V_tcbinfo);
980 for (inp = LIST_FIRST(V_tcbinfo.ipi_listhead), i = 0;
981 inp != NULL && i < n; inp = LIST_NEXT(inp, inp_list)) {
983 if (inp->inp_gencnt <= gencnt) {
985 * XXX: This use of cr_cansee(), introduced with
986 * TCP state changes, is not quite right, but for
987 * now, better than nothing.
989 if (inp->inp_vflag & INP_TIMEWAIT) {
990 if (intotw(inp) != NULL)
991 error = cr_cansee(req->td->td_ucred,
992 intotw(inp)->tw_cred);
994 error = EINVAL; /* Skip this inp. */
996 error = cr_canseesocket(req->td->td_ucred,
1003 INP_INFO_RUNLOCK(&V_tcbinfo);
1007 for (i = 0; i < n; i++) {
1010 if (inp->inp_gencnt <= gencnt) {
1014 bzero(&xt, sizeof(xt));
1015 xt.xt_len = sizeof xt;
1016 /* XXX should avoid extra copy */
1017 bcopy(inp, &xt.xt_inp, sizeof *inp);
1018 inp_ppcb = inp->inp_ppcb;
1019 if (inp_ppcb == NULL)
1020 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
1021 else if (inp->inp_vflag & INP_TIMEWAIT) {
1022 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
1023 xt.xt_tp.t_state = TCPS_TIME_WAIT;
1025 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
1026 if (inp->inp_socket != NULL)
1027 sotoxsocket(inp->inp_socket, &xt.xt_socket);
1029 bzero(&xt.xt_socket, sizeof xt.xt_socket);
1030 xt.xt_socket.xso_protocol = IPPROTO_TCP;
1032 xt.xt_inp.inp_gencnt = inp->inp_gencnt;
1034 error = SYSCTL_OUT(req, &xt, sizeof xt);
1041 * Give the user an updated idea of our state.
1042 * If the generation differs from what we told
1043 * her before, she knows that something happened
1044 * while we were processing this request, and it
1045 * might be necessary to retry.
1047 INP_INFO_RLOCK(&V_tcbinfo);
1048 xig.xig_gen = V_tcbinfo.ipi_gencnt;
1049 xig.xig_sogen = so_gencnt;
1050 xig.xig_count = V_tcbinfo.ipi_count + pcb_count;
1051 INP_INFO_RUNLOCK(&V_tcbinfo);
1052 error = SYSCTL_OUT(req, &xig, sizeof xig);
1054 free(inp_list, M_TEMP);
1058 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
1059 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
1062 tcp_getcred(SYSCTL_HANDLER_ARGS)
1065 struct sockaddr_in addrs[2];
1069 error = priv_check(req->td, PRIV_NETINET_GETCRED);
1072 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1075 INP_INFO_RLOCK(&V_tcbinfo);
1076 inp = in_pcblookup_hash(&V_tcbinfo, addrs[1].sin_addr,
1077 addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
1080 INP_INFO_RUNLOCK(&V_tcbinfo);
1081 if (inp->inp_socket == NULL)
1084 error = cr_canseesocket(req->td->td_ucred,
1087 cru2x(inp->inp_socket->so_cred, &xuc);
1090 INP_INFO_RUNLOCK(&V_tcbinfo);
1094 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1098 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
1099 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1100 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection");
1104 tcp6_getcred(SYSCTL_HANDLER_ARGS)
1107 struct sockaddr_in6 addrs[2];
1109 int error, mapped = 0;
1111 error = priv_check(req->td, PRIV_NETINET_GETCRED);
1114 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1117 if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 ||
1118 (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) {
1121 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
1122 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
1128 INP_INFO_RLOCK(&V_tcbinfo);
1130 inp = in_pcblookup_hash(&V_tcbinfo,
1131 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
1133 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
1137 inp = in6_pcblookup_hash(&V_tcbinfo,
1138 &addrs[1].sin6_addr, addrs[1].sin6_port,
1139 &addrs[0].sin6_addr, addrs[0].sin6_port, 0, NULL);
1142 INP_INFO_RUNLOCK(&V_tcbinfo);
1143 if (inp->inp_socket == NULL)
1146 error = cr_canseesocket(req->td->td_ucred,
1149 cru2x(inp->inp_socket->so_cred, &xuc);
1152 INP_INFO_RUNLOCK(&V_tcbinfo);
1156 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1160 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
1161 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1162 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection");
1167 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
1169 struct ip *ip = vip;
1171 struct in_addr faddr;
1174 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1176 struct in_conninfo inc;
1177 tcp_seq icmp_tcp_seq;
1180 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1181 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1184 if (cmd == PRC_MSGSIZE)
1185 notify = tcp_mtudisc;
1186 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1187 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip)
1188 notify = tcp_drop_syn_sent;
1190 * Redirects don't need to be handled up here.
1192 else if (PRC_IS_REDIRECT(cmd))
1195 * Source quench is depreciated.
1197 else if (cmd == PRC_QUENCH)
1200 * Hostdead is ugly because it goes linearly through all PCBs.
1201 * XXX: We never get this from ICMP, otherwise it makes an
1202 * excellent DoS attack on machines with many connections.
1204 else if (cmd == PRC_HOSTDEAD)
1206 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
1209 icp = (struct icmp *)((caddr_t)ip
1210 - offsetof(struct icmp, icmp_ip));
1211 th = (struct tcphdr *)((caddr_t)ip
1212 + (ip->ip_hl << 2));
1213 INP_INFO_WLOCK(&V_tcbinfo);
1214 inp = in_pcblookup_hash(&V_tcbinfo, faddr, th->th_dport,
1215 ip->ip_src, th->th_sport, 0, NULL);
1218 if (!(inp->inp_vflag & INP_TIMEWAIT) &&
1219 !(inp->inp_vflag & INP_DROPPED) &&
1220 !(inp->inp_socket == NULL)) {
1221 icmp_tcp_seq = htonl(th->th_seq);
1222 tp = intotcpcb(inp);
1223 if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
1224 SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
1225 if (cmd == PRC_MSGSIZE) {
1228 * If we got a needfrag set the MTU
1229 * in the route to the suggested new
1230 * value (if given) and then notify.
1232 bzero(&inc, sizeof(inc));
1233 inc.inc_flags = 0; /* IPv4 */
1234 inc.inc_faddr = faddr;
1236 inp->inp_inc.inc_fibnum;
1238 mtu = ntohs(icp->icmp_nextmtu);
1240 * If no alternative MTU was
1241 * proposed, try the next smaller
1242 * one. ip->ip_len has already
1243 * been swapped in icmp_input().
1246 mtu = ip_next_mtu(ip->ip_len,
1248 if (mtu < max(296, V_tcp_minmss
1249 + sizeof(struct tcpiphdr)))
1253 + sizeof(struct tcpiphdr);
1255 * Only cache the the MTU if it
1256 * is smaller than the interface
1257 * or route MTU. tcp_mtudisc()
1258 * will do right thing by itself.
1260 if (mtu <= tcp_maxmtu(&inc, NULL))
1261 tcp_hc_updatemtu(&inc, mtu);
1264 inp = (*notify)(inp, inetctlerrmap[cmd]);
1270 inc.inc_fport = th->th_dport;
1271 inc.inc_lport = th->th_sport;
1272 inc.inc_faddr = faddr;
1273 inc.inc_laddr = ip->ip_src;
1277 syncache_unreach(&inc, th);
1279 INP_INFO_WUNLOCK(&V_tcbinfo);
1281 in_pcbnotifyall(&V_tcbinfo, faddr, inetctlerrmap[cmd], notify);
1286 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
1289 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1290 struct ip6_hdr *ip6;
1292 struct ip6ctlparam *ip6cp = NULL;
1293 const struct sockaddr_in6 *sa6_src = NULL;
1295 struct tcp_portonly {
1300 if (sa->sa_family != AF_INET6 ||
1301 sa->sa_len != sizeof(struct sockaddr_in6))
1304 if (cmd == PRC_MSGSIZE)
1305 notify = tcp_mtudisc;
1306 else if (!PRC_IS_REDIRECT(cmd) &&
1307 ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
1309 /* Source quench is depreciated. */
1310 else if (cmd == PRC_QUENCH)
1313 /* if the parameter is from icmp6, decode it. */
1315 ip6cp = (struct ip6ctlparam *)d;
1317 ip6 = ip6cp->ip6c_ip6;
1318 off = ip6cp->ip6c_off;
1319 sa6_src = ip6cp->ip6c_src;
1323 off = 0; /* fool gcc */
1328 struct in_conninfo inc;
1330 * XXX: We assume that when IPV6 is non NULL,
1331 * M and OFF are valid.
1334 /* check if we can safely examine src and dst ports */
1335 if (m->m_pkthdr.len < off + sizeof(*thp))
1338 bzero(&th, sizeof(th));
1339 m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
1341 in6_pcbnotify(&V_tcbinfo, sa, th.th_dport,
1342 (struct sockaddr *)ip6cp->ip6c_src,
1343 th.th_sport, cmd, NULL, notify);
1345 inc.inc_fport = th.th_dport;
1346 inc.inc_lport = th.th_sport;
1347 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
1348 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
1350 INP_INFO_WLOCK(&V_tcbinfo);
1351 syncache_unreach(&inc, &th);
1352 INP_INFO_WUNLOCK(&V_tcbinfo);
1354 in6_pcbnotify(&V_tcbinfo, sa, 0, (const struct sockaddr *)sa6_src,
1355 0, cmd, NULL, notify);
1361 * Following is where TCP initial sequence number generation occurs.
1363 * There are two places where we must use initial sequence numbers:
1364 * 1. In SYN-ACK packets.
1365 * 2. In SYN packets.
1367 * All ISNs for SYN-ACK packets are generated by the syncache. See
1368 * tcp_syncache.c for details.
1370 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1371 * depends on this property. In addition, these ISNs should be
1372 * unguessable so as to prevent connection hijacking. To satisfy
1373 * the requirements of this situation, the algorithm outlined in
1374 * RFC 1948 is used, with only small modifications.
1376 * Implementation details:
1378 * Time is based off the system timer, and is corrected so that it
1379 * increases by one megabyte per second. This allows for proper
1380 * recycling on high speed LANs while still leaving over an hour
1383 * As reading the *exact* system time is too expensive to be done
1384 * whenever setting up a TCP connection, we increment the time
1385 * offset in two ways. First, a small random positive increment
1386 * is added to isn_offset for each connection that is set up.
1387 * Second, the function tcp_isn_tick fires once per clock tick
1388 * and increments isn_offset as necessary so that sequence numbers
1389 * are incremented at approximately ISN_BYTES_PER_SECOND. The
1390 * random positive increments serve only to ensure that the same
1391 * exact sequence number is never sent out twice (as could otherwise
1392 * happen when a port is recycled in less than the system tick
1395 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1396 * between seeding of isn_secret. This is normally set to zero,
1397 * as reseeding should not be necessary.
1399 * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
1400 * isn_offset_old, and isn_ctx is performed using the TCP pcbinfo lock. In
1401 * general, this means holding an exclusive (write) lock.
1404 #define ISN_BYTES_PER_SECOND 1048576
1405 #define ISN_STATIC_INCREMENT 4096
1406 #define ISN_RANDOM_INCREMENT (4096 - 1)
1408 static u_char isn_secret[32];
1409 static int isn_last_reseed;
1410 static u_int32_t isn_offset, isn_offset_old;
1411 static MD5_CTX isn_ctx;
1414 tcp_new_isn(struct tcpcb *tp)
1416 u_int32_t md5_buffer[4];
1419 INP_WLOCK_ASSERT(tp->t_inpcb);
1422 /* Seed if this is the first use, reseed if requested. */
1423 if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) &&
1424 (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz)
1426 read_random(&V_isn_secret, sizeof(V_isn_secret));
1427 V_isn_last_reseed = ticks;
1430 /* Compute the md5 hash and return the ISN. */
1431 MD5Init(&V_isn_ctx);
1432 MD5Update(&V_isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
1433 MD5Update(&V_isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
1435 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
1436 MD5Update(&V_isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1437 sizeof(struct in6_addr));
1438 MD5Update(&V_isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1439 sizeof(struct in6_addr));
1443 MD5Update(&V_isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1444 sizeof(struct in_addr));
1445 MD5Update(&V_isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1446 sizeof(struct in_addr));
1448 MD5Update(&V_isn_ctx, (u_char *) &V_isn_secret, sizeof(V_isn_secret));
1449 MD5Final((u_char *) &md5_buffer, &V_isn_ctx);
1450 new_isn = (tcp_seq) md5_buffer[0];
1451 V_isn_offset += ISN_STATIC_INCREMENT +
1452 (arc4random() & ISN_RANDOM_INCREMENT);
1453 new_isn += V_isn_offset;
1459 * Increment the offset to the next ISN_BYTES_PER_SECOND / 100 boundary
1460 * to keep time flowing at a relatively constant rate. If the random
1461 * increments have already pushed us past the projected offset, do nothing.
1464 tcp_isn_tick(void *xtp)
1466 u_int32_t projected_offset;
1469 projected_offset = V_isn_offset_old + ISN_BYTES_PER_SECOND / 100;
1471 if (SEQ_GT(projected_offset, V_isn_offset))
1472 V_isn_offset = projected_offset;
1474 V_isn_offset_old = V_isn_offset;
1475 callout_reset(&isn_callout, hz/100, tcp_isn_tick, NULL);
1480 * When a specific ICMP unreachable message is received and the
1481 * connection state is SYN-SENT, drop the connection. This behavior
1482 * is controlled by the icmp_may_rst sysctl.
1485 tcp_drop_syn_sent(struct inpcb *inp, int errno)
1489 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1490 INP_WLOCK_ASSERT(inp);
1492 if ((inp->inp_vflag & INP_TIMEWAIT) ||
1493 (inp->inp_vflag & INP_DROPPED))
1496 tp = intotcpcb(inp);
1497 if (tp->t_state != TCPS_SYN_SENT)
1500 tp = tcp_drop(tp, errno);
1508 * When `need fragmentation' ICMP is received, update our idea of the MSS
1509 * based on the new value in the route. Also nudge TCP to send something,
1510 * since we know the packet we just sent was dropped.
1511 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1514 tcp_mtudisc(struct inpcb *inp, int errno)
1519 INP_WLOCK_ASSERT(inp);
1520 if ((inp->inp_vflag & INP_TIMEWAIT) ||
1521 (inp->inp_vflag & INP_DROPPED))
1524 tp = intotcpcb(inp);
1525 KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL"));
1527 tcp_mss_update(tp, -1, NULL);
1529 so = inp->inp_socket;
1530 SOCKBUF_LOCK(&so->so_snd);
1531 /* If the mss is larger than the socket buffer, decrease the mss. */
1532 if (so->so_snd.sb_hiwat < tp->t_maxseg)
1533 tp->t_maxseg = so->so_snd.sb_hiwat;
1534 SOCKBUF_UNLOCK(&so->so_snd);
1536 V_tcpstat.tcps_mturesent++;
1538 tp->snd_nxt = tp->snd_una;
1539 tcp_free_sackholes(tp);
1540 tp->snd_recover = tp->snd_max;
1541 if (tp->t_flags & TF_SACK_PERMIT)
1542 EXIT_FASTRECOVERY(tp);
1543 tcp_output_send(tp);
1548 * Look-up the routing entry to the peer of this inpcb. If no route
1549 * is found and it cannot be allocated, then return NULL. This routine
1550 * is called by TCP routines that access the rmx structure and by tcp_mss
1551 * to get the interface MTU.
1554 tcp_maxmtu(struct in_conninfo *inc, int *flags)
1557 struct sockaddr_in *dst;
1561 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
1563 bzero(&sro, sizeof(sro));
1564 if (inc->inc_faddr.s_addr != INADDR_ANY) {
1565 dst = (struct sockaddr_in *)&sro.ro_dst;
1566 dst->sin_family = AF_INET;
1567 dst->sin_len = sizeof(*dst);
1568 dst->sin_addr = inc->inc_faddr;
1569 in_rtalloc_ign(&sro, RTF_CLONING, inc->inc_fibnum);
1571 if (sro.ro_rt != NULL) {
1572 ifp = sro.ro_rt->rt_ifp;
1573 if (sro.ro_rt->rt_rmx.rmx_mtu == 0)
1574 maxmtu = ifp->if_mtu;
1576 maxmtu = min(sro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu);
1578 /* Report additional interface capabilities. */
1579 if (flags != NULL) {
1580 if (ifp->if_capenable & IFCAP_TSO4 &&
1581 ifp->if_hwassist & CSUM_TSO)
1591 tcp_maxmtu6(struct in_conninfo *inc, int *flags)
1593 struct route_in6 sro6;
1597 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
1599 bzero(&sro6, sizeof(sro6));
1600 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
1601 sro6.ro_dst.sin6_family = AF_INET6;
1602 sro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6);
1603 sro6.ro_dst.sin6_addr = inc->inc6_faddr;
1604 rtalloc_ign((struct route *)&sro6, RTF_CLONING);
1606 if (sro6.ro_rt != NULL) {
1607 ifp = sro6.ro_rt->rt_ifp;
1608 if (sro6.ro_rt->rt_rmx.rmx_mtu == 0)
1609 maxmtu = IN6_LINKMTU(sro6.ro_rt->rt_ifp);
1611 maxmtu = min(sro6.ro_rt->rt_rmx.rmx_mtu,
1612 IN6_LINKMTU(sro6.ro_rt->rt_ifp));
1614 /* Report additional interface capabilities. */
1615 if (flags != NULL) {
1616 if (ifp->if_capenable & IFCAP_TSO6 &&
1617 ifp->if_hwassist & CSUM_TSO)
1628 /* compute ESP/AH header size for TCP, including outer IP header. */
1630 ipsec_hdrsiz_tcp(struct tcpcb *tp)
1637 struct ip6_hdr *ip6;
1641 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1643 MGETHDR(m, M_DONTWAIT, MT_DATA);
1648 if ((inp->inp_vflag & INP_IPV6) != 0) {
1649 ip6 = mtod(m, struct ip6_hdr *);
1650 th = (struct tcphdr *)(ip6 + 1);
1651 m->m_pkthdr.len = m->m_len =
1652 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1653 tcpip_fillheaders(inp, ip6, th);
1654 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1658 ip = mtod(m, struct ip *);
1659 th = (struct tcphdr *)(ip + 1);
1660 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1661 tcpip_fillheaders(inp, ip, th);
1662 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1671 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
1673 * This code attempts to calculate the bandwidth-delay product as a
1674 * means of determining the optimal window size to maximize bandwidth,
1675 * minimize RTT, and avoid the over-allocation of buffers on interfaces and
1676 * routers. This code also does a fairly good job keeping RTTs in check
1677 * across slow links like modems. We implement an algorithm which is very
1678 * similar (but not meant to be) TCP/Vegas. The code operates on the
1679 * transmitter side of a TCP connection and so only effects the transmit
1680 * side of the connection.
1682 * BACKGROUND: TCP makes no provision for the management of buffer space
1683 * at the end points or at the intermediate routers and switches. A TCP
1684 * stream, whether using NewReno or not, will eventually buffer as
1685 * many packets as it is able and the only reason this typically works is
1686 * due to the fairly small default buffers made available for a connection
1687 * (typicaly 16K or 32K). As machines use larger windows and/or window
1688 * scaling it is now fairly easy for even a single TCP connection to blow-out
1689 * all available buffer space not only on the local interface, but on
1690 * intermediate routers and switches as well. NewReno makes a misguided
1691 * attempt to 'solve' this problem by waiting for an actual failure to occur,
1692 * then backing off, then steadily increasing the window again until another
1693 * failure occurs, ad-infinitum. This results in terrible oscillation that
1694 * is only made worse as network loads increase and the idea of intentionally
1695 * blowing out network buffers is, frankly, a terrible way to manage network
1698 * It is far better to limit the transmit window prior to the failure
1699 * condition being achieved. There are two general ways to do this: First
1700 * you can 'scan' through different transmit window sizes and locate the
1701 * point where the RTT stops increasing, indicating that you have filled the
1702 * pipe, then scan backwards until you note that RTT stops decreasing, then
1703 * repeat ad-infinitum. This method works in principle but has severe
1704 * implementation issues due to RTT variances, timer granularity, and
1705 * instability in the algorithm which can lead to many false positives and
1706 * create oscillations as well as interact badly with other TCP streams
1707 * implementing the same algorithm.
1709 * The second method is to limit the window to the bandwidth delay product
1710 * of the link. This is the method we implement. RTT variances and our
1711 * own manipulation of the congestion window, bwnd, can potentially
1712 * destabilize the algorithm. For this reason we have to stabilize the
1713 * elements used to calculate the window. We do this by using the minimum
1714 * observed RTT, the long term average of the observed bandwidth, and
1715 * by adding two segments worth of slop. It isn't perfect but it is able
1716 * to react to changing conditions and gives us a very stable basis on
1717 * which to extend the algorithm.
1720 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
1726 INP_WLOCK_ASSERT(tp->t_inpcb);
1729 * If inflight_enable is disabled in the middle of a tcp connection,
1730 * make sure snd_bwnd is effectively disabled.
1732 if (V_tcp_inflight_enable == 0 ||
1733 tp->t_rttlow < V_tcp_inflight_rttthresh) {
1734 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1735 tp->snd_bandwidth = 0;
1740 * Figure out the bandwidth. Due to the tick granularity this
1741 * is a very rough number and it MUST be averaged over a fairly
1742 * long period of time. XXX we need to take into account a link
1743 * that is not using all available bandwidth, but for now our
1744 * slop will ramp us up if this case occurs and the bandwidth later
1747 * Note: if ticks rollover 'bw' may wind up negative. We must
1748 * effectively reset t_bw_rtttime for this case.
1751 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1)
1754 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz /
1755 (save_ticks - tp->t_bw_rtttime);
1756 tp->t_bw_rtttime = save_ticks;
1757 tp->t_bw_rtseq = ack_seq;
1758 if (tp->t_bw_rtttime == 0 || (int)bw < 0)
1760 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4;
1762 tp->snd_bandwidth = bw;
1765 * Calculate the semi-static bandwidth delay product, plus two maximal
1766 * segments. The additional slop puts us squarely in the sweet
1767 * spot and also handles the bandwidth run-up case and stabilization.
1768 * Without the slop we could be locking ourselves into a lower
1771 * Situations Handled:
1772 * (1) Prevents over-queueing of packets on LANs, especially on
1773 * high speed LANs, allowing larger TCP buffers to be
1774 * specified, and also does a good job preventing
1775 * over-queueing of packets over choke points like modems
1776 * (at least for the transmit side).
1778 * (2) Is able to handle changing network loads (bandwidth
1779 * drops so bwnd drops, bandwidth increases so bwnd
1782 * (3) Theoretically should stabilize in the face of multiple
1783 * connections implementing the same algorithm (this may need
1786 * (4) Stability value (defaults to 20 = 2 maximal packets) can
1787 * be adjusted with a sysctl but typically only needs to be
1788 * on very slow connections. A value no smaller then 5
1789 * should be used, but only reduce this default if you have
1792 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2)
1793 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + V_tcp_inflight_stab * tp->t_maxseg / 10;
1796 if (tcp_inflight_debug > 0) {
1798 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) {
1800 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n",
1809 if ((long)bwnd < V_tcp_inflight_min)
1810 bwnd = V_tcp_inflight_min;
1811 if (bwnd > V_tcp_inflight_max)
1812 bwnd = V_tcp_inflight_max;
1813 if ((long)bwnd < tp->t_maxseg * 2)
1814 bwnd = tp->t_maxseg * 2;
1815 tp->snd_bwnd = bwnd;
1818 #ifdef TCP_SIGNATURE
1820 * Callback function invoked by m_apply() to digest TCP segment data
1821 * contained within an mbuf chain.
1824 tcp_signature_apply(void *fstate, void *data, u_int len)
1827 MD5Update(fstate, (u_char *)data, len);
1832 * Compute TCP-MD5 hash of a TCPv4 segment. (RFC2385)
1835 * m pointer to head of mbuf chain
1836 * off0 offset to TCP header within the mbuf chain
1837 * len length of TCP segment data, excluding options
1838 * optlen length of TCP segment options
1839 * buf pointer to storage for computed MD5 digest
1840 * direction direction of flow (IPSEC_DIR_INBOUND or OUTBOUND)
1842 * We do this over ip, tcphdr, segment data, and the key in the SADB.
1843 * When called from tcp_input(), we can be sure that th_sum has been
1844 * zeroed out and verified already.
1846 * This function is for IPv4 use only. Calling this function with an
1847 * IPv6 packet in the mbuf chain will yield undefined results.
1849 * Return 0 if successful, otherwise return -1.
1851 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
1852 * search with the destination IP address, and a 'magic SPI' to be
1853 * determined by the application. This is hardcoded elsewhere to 1179
1854 * right now. Another branch of this code exists which uses the SPD to
1855 * specify per-application flows but it is unstable.
1858 tcp_signature_compute(struct mbuf *m, int off0, int len, int optlen,
1859 u_char *buf, u_int direction)
1861 union sockaddr_union dst;
1862 struct ippseudo ippseudo;
1866 struct ipovly *ipovly;
1867 struct secasvar *sav;
1871 KASSERT(m != NULL, ("NULL mbuf chain"));
1872 KASSERT(buf != NULL, ("NULL signature pointer"));
1874 /* Extract the destination from the IP header in the mbuf. */
1875 ip = mtod(m, struct ip *);
1876 bzero(&dst, sizeof(union sockaddr_union));
1877 dst.sa.sa_len = sizeof(struct sockaddr_in);
1878 dst.sa.sa_family = AF_INET;
1879 dst.sin.sin_addr = (direction == IPSEC_DIR_INBOUND) ?
1880 ip->ip_src : ip->ip_dst;
1882 /* Look up an SADB entry which matches the address of the peer. */
1883 sav = KEY_ALLOCSA(&dst, IPPROTO_TCP, htonl(TCP_SIG_SPI));
1885 printf("%s: SADB lookup failed for %s\n", __func__,
1886 inet_ntoa(dst.sin.sin_addr));
1891 ipovly = (struct ipovly *)ip;
1892 th = (struct tcphdr *)((u_char *)ip + off0);
1893 doff = off0 + sizeof(struct tcphdr) + optlen;
1896 * Step 1: Update MD5 hash with IP pseudo-header.
1898 * XXX The ippseudo header MUST be digested in network byte order,
1899 * or else we'll fail the regression test. Assume all fields we've
1900 * been doing arithmetic on have been in host byte order.
1901 * XXX One cannot depend on ipovly->ih_len here. When called from
1902 * tcp_output(), the underlying ip_len member has not yet been set.
1904 ippseudo.ippseudo_src = ipovly->ih_src;
1905 ippseudo.ippseudo_dst = ipovly->ih_dst;
1906 ippseudo.ippseudo_pad = 0;
1907 ippseudo.ippseudo_p = IPPROTO_TCP;
1908 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen);
1909 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo));
1912 * Step 2: Update MD5 hash with TCP header, excluding options.
1913 * The TCP checksum must be set to zero.
1915 savecsum = th->th_sum;
1917 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr));
1918 th->th_sum = savecsum;
1921 * Step 3: Update MD5 hash with TCP segment data.
1922 * Use m_apply() to avoid an early m_pullup().
1925 m_apply(m, doff, len, tcp_signature_apply, &ctx);
1928 * Step 4: Update MD5 hash with shared secret.
1930 MD5Update(&ctx, sav->key_auth->key_data, _KEYLEN(sav->key_auth));
1931 MD5Final(buf, &ctx);
1933 key_sa_recordxfer(sav, m);
1937 #endif /* TCP_SIGNATURE */
1940 sysctl_drop(SYSCTL_HANDLER_ARGS)
1942 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
1943 struct sockaddr_storage addrs[2];
1947 struct sockaddr_in *fin, *lin;
1949 struct sockaddr_in6 *fin6, *lin6;
1950 struct in6_addr f6, l6;
1961 if (req->oldptr != NULL || req->oldlen != 0)
1963 if (req->newptr == NULL)
1965 if (req->newlen < sizeof(addrs))
1967 error = SYSCTL_IN(req, &addrs, sizeof(addrs));
1971 switch (addrs[0].ss_family) {
1974 fin6 = (struct sockaddr_in6 *)&addrs[0];
1975 lin6 = (struct sockaddr_in6 *)&addrs[1];
1976 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
1977 lin6->sin6_len != sizeof(struct sockaddr_in6))
1979 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
1980 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
1982 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
1983 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
1984 fin = (struct sockaddr_in *)&addrs[0];
1985 lin = (struct sockaddr_in *)&addrs[1];
1988 error = sa6_embedscope(fin6, V_ip6_use_defzone);
1991 error = sa6_embedscope(lin6, V_ip6_use_defzone);
1997 fin = (struct sockaddr_in *)&addrs[0];
1998 lin = (struct sockaddr_in *)&addrs[1];
1999 if (fin->sin_len != sizeof(struct sockaddr_in) ||
2000 lin->sin_len != sizeof(struct sockaddr_in))
2006 INP_INFO_WLOCK(&V_tcbinfo);
2007 switch (addrs[0].ss_family) {
2010 inp = in6_pcblookup_hash(&V_tcbinfo, &f6, fin6->sin6_port,
2011 &l6, lin6->sin6_port, 0, NULL);
2015 inp = in_pcblookup_hash(&V_tcbinfo, fin->sin_addr,
2016 fin->sin_port, lin->sin_addr, lin->sin_port, 0, NULL);
2021 if (inp->inp_vflag & INP_TIMEWAIT) {
2023 * XXXRW: There currently exists a state where an
2024 * inpcb is present, but its timewait state has been
2025 * discarded. For now, don't allow dropping of this
2033 } else if (!(inp->inp_vflag & INP_DROPPED) &&
2034 !(inp->inp_socket->so_options & SO_ACCEPTCONN)) {
2035 tp = intotcpcb(inp);
2036 tp = tcp_drop(tp, ECONNABORTED);
2043 INP_INFO_WUNLOCK(&V_tcbinfo);
2047 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop,
2048 CTLTYPE_STRUCT|CTLFLAG_WR|CTLFLAG_SKIP, NULL,
2049 0, sysctl_drop, "", "Drop TCP connection");
2052 * Generate a standardized TCP log line for use throughout the
2053 * tcp subsystem. Memory allocation is done with M_NOWAIT to
2054 * allow use in the interrupt context.
2056 * NB: The caller MUST free(s, M_TCPLOG) the returned string.
2057 * NB: The function may return NULL if memory allocation failed.
2059 * Due to header inclusion and ordering limitations the struct ip
2060 * and ip6_hdr pointers have to be passed as void pointers.
2063 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
2070 const struct ip6_hdr *ip6;
2072 ip6 = (const struct ip6_hdr *)ip6hdr;
2074 ip = (struct ip *)ip4hdr;
2077 * The log line looks like this:
2078 * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>"
2080 size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") +
2081 sizeof(PRINT_TH_FLAGS) + 1 +
2083 2 * INET6_ADDRSTRLEN;
2085 2 * INET_ADDRSTRLEN;
2088 /* Is logging enabled? */
2089 if (tcp_log_debug == 0 && tcp_log_in_vain == 0)
2092 s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT);
2096 strcat(s, "TCP: [");
2099 if (inc && inc->inc_isipv6 == 0) {
2100 inet_ntoa_r(inc->inc_faddr, sp);
2102 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
2104 inet_ntoa_r(inc->inc_laddr, sp);
2106 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
2109 ip6_sprintf(sp, &inc->inc6_faddr);
2111 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
2113 ip6_sprintf(sp, &inc->inc6_laddr);
2115 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
2116 } else if (ip6 && th) {
2117 ip6_sprintf(sp, &ip6->ip6_src);
2119 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
2121 ip6_sprintf(sp, &ip6->ip6_dst);
2123 sprintf(sp, "]:%i", ntohs(th->th_dport));
2125 } else if (ip && th) {
2126 inet_ntoa_r(ip->ip_src, sp);
2128 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
2130 inet_ntoa_r(ip->ip_dst, sp);
2132 sprintf(sp, "]:%i", ntohs(th->th_dport));
2139 sprintf(sp, " tcpflags 0x%b", th->th_flags, PRINT_TH_FLAGS);
2140 if (*(s + size - 1) != '\0')
2141 panic("%s: string too long", __func__);