2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
33 #include "opt_compat.h"
35 #include "opt_inet6.h"
36 #include "opt_ipsec.h"
38 #include "opt_tcpdebug.h"
39 #include "opt_tcp_sack.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
47 #include <sys/malloc.h>
50 #include <sys/domain.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/protosw.h>
56 #include <sys/random.h>
60 #include <net/route.h>
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/ip.h>
67 #include <netinet/ip6.h>
69 #include <netinet/in_pcb.h>
71 #include <netinet6/in6_pcb.h>
73 #include <netinet/in_var.h>
74 #include <netinet/ip_var.h>
76 #include <netinet6/ip6_var.h>
77 #include <netinet6/nd6.h>
79 #include <netinet/ip_icmp.h>
80 #include <netinet/tcp.h>
81 #include <netinet/tcp_fsm.h>
82 #include <netinet/tcp_seq.h>
83 #include <netinet/tcp_timer.h>
84 #include <netinet/tcp_var.h>
86 #include <netinet6/tcp6_var.h>
88 #include <netinet/tcpip.h>
90 #include <netinet/tcp_debug.h>
92 #include <netinet6/ip6protosw.h>
95 #include <netinet6/ipsec.h>
97 #include <netinet6/ipsec6.h>
99 #include <netkey/key.h>
103 #include <netipsec/ipsec.h>
104 #include <netipsec/xform.h>
106 #include <netipsec/ipsec6.h>
108 #include <netipsec/key.h>
110 #endif /*FAST_IPSEC*/
112 #include <machine/in_cksum.h>
115 int tcp_mssdflt = TCP_MSS;
116 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
117 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
120 int tcp_v6mssdflt = TCP6_MSS;
121 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
122 CTLFLAG_RW, &tcp_v6mssdflt , 0,
123 "Default TCP Maximum Segment Size for IPv6");
127 * Minimum MSS we accept and use. This prevents DoS attacks where
128 * we are forced to a ridiculous low MSS like 20 and send hundreds
129 * of packets instead of one. The effect scales with the available
130 * bandwidth and quickly saturates the CPU and network interface
131 * with packet generation and sending. Set to zero to disable MINMSS
132 * checking. This setting prevents us from sending too small packets.
134 int tcp_minmss = TCP_MINMSS;
135 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
136 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
138 * Number of TCP segments per second we accept from remote host
139 * before we start to calculate average segment size. If average
140 * segment size drops below the minimum TCP MSS we assume a DoS
141 * attack and reset+drop the connection. Care has to be taken not to
142 * set this value too small to not kill interactive type connections
143 * (telnet, SSH) which send many small packets.
145 int tcp_minmssoverload = TCP_MINMSSOVERLOAD;
146 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW,
147 &tcp_minmssoverload , 0, "Number of TCP Segments per Second allowed to"
148 "be under the MINMSS Size");
151 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
152 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW,
153 &tcp_rttdflt , 0, "Default maximum TCP Round Trip Time");
156 int tcp_do_rfc1323 = 1;
157 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
158 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions");
160 static int tcp_tcbhashsize = 0;
161 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN,
162 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
164 static int do_tcpdrain = 1;
165 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
166 "Enable tcp_drain routine for extra help when low on mbufs");
168 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
169 &tcbinfo.ipi_count, 0, "Number of active PCBs");
171 static int icmp_may_rst = 1;
172 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
173 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
175 static int tcp_isn_reseed_interval = 0;
176 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
177 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
180 * TCP bandwidth limiting sysctls. Note that the default lower bound of
181 * 1024 exists only for debugging. A good production default would be
182 * something like 6100.
184 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, inflight, CTLFLAG_RW, 0,
185 "TCP inflight data limiting");
187 static int tcp_inflight_enable = 1;
188 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, enable, CTLFLAG_RW,
189 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting");
191 static int tcp_inflight_debug = 0;
192 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, debug, CTLFLAG_RW,
193 &tcp_inflight_debug, 0, "Debug TCP inflight calculations");
195 static int tcp_inflight_min = 6144;
196 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, min, CTLFLAG_RW,
197 &tcp_inflight_min, 0, "Lower-bound for TCP inflight window");
199 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT;
200 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, max, CTLFLAG_RW,
201 &tcp_inflight_max, 0, "Upper-bound for TCP inflight window");
203 static int tcp_inflight_stab = 20;
204 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, stab, CTLFLAG_RW,
205 &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets");
207 uma_zone_t sack_hole_zone;
209 static struct inpcb *tcp_notify(struct inpcb *, int);
210 static void tcp_discardcb(struct tcpcb *);
211 static void tcp_isn_tick(void *);
214 * Target size of TCP PCB hash tables. Must be a power of two.
216 * Note that this can be overridden by the kernel environment
217 * variable net.inet.tcp.tcbhashsize
220 #define TCBHASHSIZE 512
225 * Callouts should be moved into struct tcp directly. They are currently
226 * separate because the tcpcb structure is exported to userland for sysctl
227 * parsing purposes, which do not know about callouts.
231 struct callout tcpcb_mem_rexmt, tcpcb_mem_persist, tcpcb_mem_keep;
232 struct callout tcpcb_mem_2msl, tcpcb_mem_delack;
235 static uma_zone_t tcpcb_zone;
236 static uma_zone_t tcptw_zone;
237 struct callout isn_callout;
245 int hashsize = TCBHASHSIZE;
247 tcp_delacktime = TCPTV_DELACK;
248 tcp_keepinit = TCPTV_KEEP_INIT;
249 tcp_keepidle = TCPTV_KEEP_IDLE;
250 tcp_keepintvl = TCPTV_KEEPINTVL;
251 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
253 tcp_rexmit_min = TCPTV_MIN;
254 tcp_rexmit_slop = TCPTV_CPU_VAR;
256 INP_INFO_LOCK_INIT(&tcbinfo, "tcp");
258 tcbinfo.listhead = &tcb;
259 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
260 if (!powerof2(hashsize)) {
261 printf("WARNING: TCB hash size not a power of 2\n");
262 hashsize = 512; /* safe default */
264 tcp_tcbhashsize = hashsize;
265 tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
266 tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
267 &tcbinfo.porthashmask);
268 tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb),
269 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
270 uma_zone_set_max(tcbinfo.ipi_zone, maxsockets);
272 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
274 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
276 if (max_protohdr < TCP_MINPROTOHDR)
277 max_protohdr = TCP_MINPROTOHDR;
278 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
280 #undef TCP_MINPROTOHDR
282 * These have to be type stable for the benefit of the timers.
284 tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
285 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
286 uma_zone_set_max(tcpcb_zone, maxsockets);
287 tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
288 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
289 uma_zone_set_max(tcptw_zone, maxsockets / 5);
294 callout_init(&isn_callout, CALLOUT_MPSAFE);
296 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
297 SHUTDOWN_PRI_DEFAULT);
298 sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
299 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
306 callout_stop(&isn_callout);
311 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
312 * tcp_template used to store this data in mbufs, but we now recopy it out
313 * of the tcpcb each time to conserve mbufs.
316 tcpip_fillheaders(inp, ip_ptr, tcp_ptr)
321 struct tcphdr *th = (struct tcphdr *)tcp_ptr;
323 INP_LOCK_ASSERT(inp);
326 if ((inp->inp_vflag & INP_IPV6) != 0) {
329 ip6 = (struct ip6_hdr *)ip_ptr;
330 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
331 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
332 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
333 (IPV6_VERSION & IPV6_VERSION_MASK);
334 ip6->ip6_nxt = IPPROTO_TCP;
335 ip6->ip6_plen = sizeof(struct tcphdr);
336 ip6->ip6_src = inp->in6p_laddr;
337 ip6->ip6_dst = inp->in6p_faddr;
343 ip = (struct ip *)ip_ptr;
344 ip->ip_v = IPVERSION;
346 ip->ip_tos = inp->inp_ip_tos;
350 ip->ip_ttl = inp->inp_ip_ttl;
352 ip->ip_p = IPPROTO_TCP;
353 ip->ip_src = inp->inp_laddr;
354 ip->ip_dst = inp->inp_faddr;
356 th->th_sport = inp->inp_lport;
357 th->th_dport = inp->inp_fport;
365 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */
369 * Create template to be used to send tcp packets on a connection.
370 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
371 * use for this function is in keepalives, which use tcp_respond.
374 tcpip_maketemplate(inp)
380 m = m_get(M_DONTWAIT, MT_HEADER);
383 m->m_len = sizeof(struct tcptemp);
384 n = mtod(m, struct tcptemp *);
386 tcpip_fillheaders(inp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
391 * Send a single message to the TCP at address specified by
392 * the given TCP/IP header. If m == NULL, then we make a copy
393 * of the tcpiphdr at ti and send directly to the addressed host.
394 * This is used to force keep alive messages out using the TCP
395 * template for a connection. If flags are given then we send
396 * a message back to the TCP which originated the * segment ti,
397 * and discard the mbuf containing it and any other attached mbufs.
399 * In any case the ack and sequence number of the transmitted
400 * segment are as specified by the parameters.
402 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
405 tcp_respond(tp, ipgen, th, m, ack, seq, flags)
408 register struct tcphdr *th;
409 register struct mbuf *m;
424 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
427 isipv6 = ((struct ip *)ipgen)->ip_v == 6;
434 KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
435 INP_INFO_WLOCK_ASSERT(&tcbinfo);
436 INP_LOCK_ASSERT(inp);
441 if (!(flags & TH_RST)) {
442 win = sbspace(&inp->inp_socket->so_rcv);
443 if (win > (long)TCP_MAXWIN << tp->rcv_scale)
444 win = (long)TCP_MAXWIN << tp->rcv_scale;
448 m = m_gethdr(M_DONTWAIT, MT_HEADER);
452 m->m_data += max_linkhdr;
455 bcopy((caddr_t)ip6, mtod(m, caddr_t),
456 sizeof(struct ip6_hdr));
457 ip6 = mtod(m, struct ip6_hdr *);
458 nth = (struct tcphdr *)(ip6 + 1);
462 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
463 ip = mtod(m, struct ip *);
464 nth = (struct tcphdr *)(ip + 1);
466 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
471 m->m_data = (caddr_t)ipgen;
472 /* m_len is set later */
474 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
477 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
478 nth = (struct tcphdr *)(ip6 + 1);
482 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
483 nth = (struct tcphdr *)(ip + 1);
487 * this is usually a case when an extension header
488 * exists between the IPv6 header and the
491 nth->th_sport = th->th_sport;
492 nth->th_dport = th->th_dport;
494 xchg(nth->th_dport, nth->th_sport, n_short);
500 ip6->ip6_vfc = IPV6_VERSION;
501 ip6->ip6_nxt = IPPROTO_TCP;
502 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
504 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
508 tlen += sizeof (struct tcpiphdr);
510 ip->ip_ttl = ip_defttl;
511 if (path_mtu_discovery)
515 m->m_pkthdr.len = tlen;
516 m->m_pkthdr.rcvif = NULL;
520 * Packet is associated with a socket, so allow the
521 * label of the response to reflect the socket label.
523 INP_LOCK_ASSERT(inp);
524 mac_create_mbuf_from_inpcb(inp, m);
527 * Packet is not associated with a socket, so possibly
528 * update the label in place.
530 mac_reflect_mbuf_tcp(m);
533 nth->th_seq = htonl(seq);
534 nth->th_ack = htonl(ack);
536 nth->th_off = sizeof (struct tcphdr) >> 2;
537 nth->th_flags = flags;
539 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
541 nth->th_win = htons((u_short)win);
546 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
547 sizeof(struct ip6_hdr),
548 tlen - sizeof(struct ip6_hdr));
549 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
554 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
555 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
556 m->m_pkthdr.csum_flags = CSUM_TCP;
557 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
560 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
561 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
565 (void) ip6_output(m, NULL, NULL, ipflags, NULL, NULL, inp);
568 (void) ip_output(m, NULL, NULL, ipflags, NULL, inp);
572 * Create a new TCP control block, making an
573 * empty reassembly queue and hooking it to the argument
574 * protocol control block. The `inp' parameter must have
575 * come from the zone allocator set up in tcp_init().
581 struct tcpcb_mem *tm;
584 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
587 tm = uma_zalloc(tcpcb_zone, M_NOWAIT | M_ZERO);
591 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */
592 tp->t_maxseg = tp->t_maxopd =
594 isipv6 ? tcp_v6mssdflt :
598 /* Set up our timeouts. */
599 callout_init(tp->tt_rexmt = &tm->tcpcb_mem_rexmt, NET_CALLOUT_MPSAFE);
600 callout_init(tp->tt_persist = &tm->tcpcb_mem_persist, NET_CALLOUT_MPSAFE);
601 callout_init(tp->tt_keep = &tm->tcpcb_mem_keep, NET_CALLOUT_MPSAFE);
602 callout_init(tp->tt_2msl = &tm->tcpcb_mem_2msl, NET_CALLOUT_MPSAFE);
603 callout_init(tp->tt_delack = &tm->tcpcb_mem_delack, NET_CALLOUT_MPSAFE);
606 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
607 tp->sack_enable = tcp_do_sack;
609 TAILQ_INIT(&tp->snd_holes);
610 tp->t_inpcb = inp; /* XXX */
612 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
613 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
614 * reasonable initial retransmit time.
616 tp->t_srtt = TCPTV_SRTTBASE;
617 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
618 tp->t_rttmin = tcp_rexmit_min;
619 tp->t_rxtcur = TCPTV_RTOBASE;
620 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
621 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
622 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
623 tp->t_rcvtime = ticks;
624 tp->t_bw_rtttime = ticks;
626 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
627 * because the socket may be bound to an IPv6 wildcard address,
628 * which may match an IPv4-mapped IPv6 address.
630 inp->inp_ip_ttl = ip_defttl;
631 inp->inp_ppcb = (caddr_t)tp;
632 return (tp); /* XXX */
636 * Drop a TCP connection, reporting
637 * the specified error. If connection is synchronized,
638 * then send a RST to peer.
642 register struct tcpcb *tp;
645 struct socket *so = tp->t_inpcb->inp_socket;
647 INP_LOCK_ASSERT(tp->t_inpcb);
648 if (TCPS_HAVERCVDSYN(tp->t_state)) {
649 tp->t_state = TCPS_CLOSED;
650 (void) tcp_output(tp);
651 tcpstat.tcps_drops++;
653 tcpstat.tcps_conndrops++;
654 if (errno == ETIMEDOUT && tp->t_softerror)
655 errno = tp->t_softerror;
656 so->so_error = errno;
657 return (tcp_close(tp));
665 struct inpcb *inp = tp->t_inpcb;
666 struct socket *so = inp->inp_socket;
668 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
671 INP_LOCK_ASSERT(inp);
674 * Make sure that all of our timers are stopped before we
677 callout_stop(tp->tt_rexmt);
678 callout_stop(tp->tt_persist);
679 callout_stop(tp->tt_keep);
680 callout_stop(tp->tt_2msl);
681 callout_stop(tp->tt_delack);
684 * If we got enough samples through the srtt filter,
685 * save the rtt and rttvar in the routing entry.
686 * 'Enough' is arbitrarily defined as 4 rtt samples.
687 * 4 samples is enough for the srtt filter to converge
688 * to within enough % of the correct value; fewer samples
689 * and we could save a bogus rtt. The danger is not high
690 * as tcp quickly recovers from everything.
691 * XXX: Works very well but needs some more statistics!
693 if (tp->t_rttupdated >= 4) {
694 struct hc_metrics_lite metrics;
697 bzero(&metrics, sizeof(metrics));
699 * Update the ssthresh always when the conditions below
700 * are satisfied. This gives us better new start value
701 * for the congestion avoidance for new connections.
702 * ssthresh is only set if packet loss occured on a session.
704 ssthresh = tp->snd_ssthresh;
705 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
707 * convert the limit from user data bytes to
708 * packets then to packet data bytes.
710 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
713 ssthresh *= (u_long)(tp->t_maxseg +
715 (isipv6 ? sizeof (struct ip6_hdr) +
716 sizeof (struct tcphdr) :
718 sizeof (struct tcpiphdr)
725 metrics.rmx_ssthresh = ssthresh;
727 metrics.rmx_rtt = tp->t_srtt;
728 metrics.rmx_rttvar = tp->t_rttvar;
729 /* XXX: This wraps if the pipe is more than 4 Gbit per second */
730 metrics.rmx_bandwidth = tp->snd_bandwidth;
731 metrics.rmx_cwnd = tp->snd_cwnd;
732 metrics.rmx_sendpipe = 0;
733 metrics.rmx_recvpipe = 0;
735 tcp_hc_update(&inp->inp_inc, &metrics);
738 /* free the reassembly queue, if any */
739 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) {
740 LIST_REMOVE(q, tqe_q);
742 uma_zfree(tcp_reass_zone, q);
746 tcp_free_sackholes(tp);
747 inp->inp_ppcb = NULL;
749 uma_zfree(tcpcb_zone, tp);
750 soisdisconnected(so);
754 * Close a TCP control block:
755 * discard all space held by the tcp
756 * discard internet protocol block
757 * wake up any sleepers
763 struct inpcb *inp = tp->t_inpcb;
765 struct socket *so = inp->inp_socket;
768 INP_LOCK_ASSERT(inp);
772 if (INP_CHECK_SOCKAF(so, AF_INET6))
777 tcpstat.tcps_closed++;
788 struct tseg_qent *te;
791 * Walk the tcpbs, if existing, and flush the reassembly queue,
793 * XXX: The "Net/3" implementation doesn't imply that the TCP
794 * reassembly queue should be flushed, but in a situation
795 * where we're really low on mbufs, this is potentially
798 INP_INFO_RLOCK(&tcbinfo);
799 LIST_FOREACH(inpb, tcbinfo.listhead, inp_list) {
800 if (inpb->inp_vflag & INP_TIMEWAIT)
803 if ((tcpb = intotcpcb(inpb)) != NULL) {
804 while ((te = LIST_FIRST(&tcpb->t_segq))
806 LIST_REMOVE(te, tqe_q);
808 uma_zfree(tcp_reass_zone, te);
812 tcp_clean_sackreport(tcpb);
816 INP_INFO_RUNLOCK(&tcbinfo);
821 * Notify a tcp user of an asynchronous error;
822 * store error as soft error, but wake up user
823 * (for now, won't do anything until can select for soft error).
825 * Do not wake up user since there currently is no mechanism for
826 * reporting soft errors (yet - a kqueue filter may be added).
828 static struct inpcb *
829 tcp_notify(inp, error)
833 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
835 INP_LOCK_ASSERT(inp);
838 * Ignore some errors if we are hooked up.
839 * If connection hasn't completed, has retransmitted several times,
840 * and receives a second error, give up now. This is better
841 * than waiting a long time to establish a connection that
842 * can never complete.
844 if (tp->t_state == TCPS_ESTABLISHED &&
845 (error == EHOSTUNREACH || error == ENETUNREACH ||
846 error == EHOSTDOWN)) {
848 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
851 return (struct inpcb *)0;
853 tp->t_softerror = error;
857 wakeup( &so->so_timeo);
864 tcp_pcblist(SYSCTL_HANDLER_ARGS)
867 struct inpcb *inp, **inp_list;
872 * The process of preparing the TCB list is too time-consuming and
873 * resource-intensive to repeat twice on every request.
875 if (req->oldptr == NULL) {
876 n = tcbinfo.ipi_count;
877 req->oldidx = 2 * (sizeof xig)
878 + (n + n/8) * sizeof(struct xtcpcb);
882 if (req->newptr != NULL)
886 * OK, now we're committed to doing something.
889 INP_INFO_RLOCK(&tcbinfo);
890 gencnt = tcbinfo.ipi_gencnt;
891 n = tcbinfo.ipi_count;
892 INP_INFO_RUNLOCK(&tcbinfo);
895 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
896 + n * sizeof(struct xtcpcb));
900 xig.xig_len = sizeof xig;
902 xig.xig_gen = gencnt;
903 xig.xig_sogen = so_gencnt;
904 error = SYSCTL_OUT(req, &xig, sizeof xig);
908 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
909 if (inp_list == NULL)
913 INP_INFO_RLOCK(&tcbinfo);
914 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp != NULL && i < n;
915 inp = LIST_NEXT(inp, inp_list)) {
917 if (inp->inp_gencnt <= gencnt) {
919 * XXX: This use of cr_cansee(), introduced with
920 * TCP state changes, is not quite right, but for
921 * now, better than nothing.
923 if (inp->inp_vflag & INP_TIMEWAIT)
924 error = cr_cansee(req->td->td_ucred,
925 intotw(inp)->tw_cred);
927 error = cr_canseesocket(req->td->td_ucred,
934 INP_INFO_RUNLOCK(&tcbinfo);
939 for (i = 0; i < n; i++) {
941 if (inp->inp_gencnt <= gencnt) {
944 xt.xt_len = sizeof xt;
945 /* XXX should avoid extra copy */
946 bcopy(inp, &xt.xt_inp, sizeof *inp);
947 inp_ppcb = inp->inp_ppcb;
948 if (inp_ppcb == NULL)
949 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
950 else if (inp->inp_vflag & INP_TIMEWAIT) {
951 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
952 xt.xt_tp.t_state = TCPS_TIME_WAIT;
954 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
955 if (inp->inp_socket != NULL)
956 sotoxsocket(inp->inp_socket, &xt.xt_socket);
958 bzero(&xt.xt_socket, sizeof xt.xt_socket);
959 xt.xt_socket.xso_protocol = IPPROTO_TCP;
961 xt.xt_inp.inp_gencnt = inp->inp_gencnt;
962 error = SYSCTL_OUT(req, &xt, sizeof xt);
967 * Give the user an updated idea of our state.
968 * If the generation differs from what we told
969 * her before, she knows that something happened
970 * while we were processing this request, and it
971 * might be necessary to retry.
974 INP_INFO_RLOCK(&tcbinfo);
975 xig.xig_gen = tcbinfo.ipi_gencnt;
976 xig.xig_sogen = so_gencnt;
977 xig.xig_count = tcbinfo.ipi_count;
978 INP_INFO_RUNLOCK(&tcbinfo);
980 error = SYSCTL_OUT(req, &xig, sizeof xig);
982 free(inp_list, M_TEMP);
986 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
987 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
990 tcp_getcred(SYSCTL_HANDLER_ARGS)
993 struct sockaddr_in addrs[2];
997 error = suser_cred(req->td->td_ucred, SUSER_ALLOWJAIL);
1000 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1004 INP_INFO_RLOCK(&tcbinfo);
1005 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
1006 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
1012 if (inp->inp_socket == NULL) {
1016 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket);
1019 cru2x(inp->inp_socket->so_cred, &xuc);
1023 INP_INFO_RUNLOCK(&tcbinfo);
1026 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1030 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
1031 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1032 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection");
1036 tcp6_getcred(SYSCTL_HANDLER_ARGS)
1039 struct sockaddr_in6 addrs[2];
1040 struct in6_addr a6[2];
1042 int error, s, mapped = 0;
1044 error = suser_cred(req->td->td_ucred, SUSER_ALLOWJAIL);
1047 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1050 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
1051 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
1056 error = in6_embedscope(&a6[0], &addrs[0], NULL, NULL);
1059 error = in6_embedscope(&a6[1], &addrs[1], NULL, NULL);
1064 INP_INFO_RLOCK(&tcbinfo);
1066 inp = in_pcblookup_hash(&tcbinfo,
1067 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
1069 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
1073 inp = in6_pcblookup_hash(&tcbinfo, &a6[1], addrs[1].sin6_port,
1074 &a6[0], addrs[0].sin6_port, 0, NULL);
1080 if (inp->inp_socket == NULL) {
1084 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket);
1087 cru2x(inp->inp_socket->so_cred, &xuc);
1091 INP_INFO_RUNLOCK(&tcbinfo);
1094 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1098 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
1099 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1100 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection");
1105 tcp_ctlinput(cmd, sa, vip)
1107 struct sockaddr *sa;
1110 struct ip *ip = vip;
1112 struct in_addr faddr;
1115 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1117 struct in_conninfo inc;
1118 tcp_seq icmp_tcp_seq;
1121 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1122 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1125 if (cmd == PRC_MSGSIZE)
1126 notify = tcp_mtudisc;
1127 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1128 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip)
1129 notify = tcp_drop_syn_sent;
1131 * Redirects don't need to be handled up here.
1133 else if (PRC_IS_REDIRECT(cmd))
1136 * Source quench is depreciated.
1138 else if (cmd == PRC_QUENCH)
1141 * Hostdead is ugly because it goes linearly through all PCBs.
1142 * XXX: We never get this from ICMP, otherwise it makes an
1143 * excellent DoS attack on machines with many connections.
1145 else if (cmd == PRC_HOSTDEAD)
1147 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
1151 icp = (struct icmp *)((caddr_t)ip
1152 - offsetof(struct icmp, icmp_ip));
1153 th = (struct tcphdr *)((caddr_t)ip
1154 + (ip->ip_hl << 2));
1155 INP_INFO_WLOCK(&tcbinfo);
1156 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
1157 ip->ip_src, th->th_sport, 0, NULL);
1160 if (inp->inp_socket != NULL) {
1161 icmp_tcp_seq = htonl(th->th_seq);
1162 tp = intotcpcb(inp);
1163 if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
1164 SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
1165 if (cmd == PRC_MSGSIZE) {
1168 * If we got a needfrag set the MTU
1169 * in the route to the suggested new
1170 * value (if given) and then notify.
1171 * If no new MTU was suggested, then
1172 * we guess a new one less than the
1174 * If the new MTU is unreasonably
1175 * small (defined by sysctl tcp_minmss),
1176 * then we up the MTU value to minimum.
1178 bzero(&inc, sizeof(inc));
1179 inc.inc_flags = 0; /* IPv4 */
1180 inc.inc_faddr = faddr;
1182 mtu = ntohs(icp->icmp_nextmtu);
1184 mtu = ip_next_mtu(mtu, 1);
1185 if (mtu >= max(296, (tcp_minmss
1186 + sizeof(struct tcpiphdr))))
1187 tcp_hc_updatemtu(&inc, mtu);
1190 inp = (*notify)(inp, inetctlerrmap[cmd]);
1196 inc.inc_fport = th->th_dport;
1197 inc.inc_lport = th->th_sport;
1198 inc.inc_faddr = faddr;
1199 inc.inc_laddr = ip->ip_src;
1203 syncache_unreach(&inc, th);
1205 INP_INFO_WUNLOCK(&tcbinfo);
1208 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
1213 tcp6_ctlinput(cmd, sa, d)
1215 struct sockaddr *sa;
1219 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1220 struct ip6_hdr *ip6;
1222 struct ip6ctlparam *ip6cp = NULL;
1223 const struct sockaddr_in6 *sa6_src = NULL;
1225 struct tcp_portonly {
1230 if (sa->sa_family != AF_INET6 ||
1231 sa->sa_len != sizeof(struct sockaddr_in6))
1234 if (cmd == PRC_MSGSIZE)
1235 notify = tcp_mtudisc;
1236 else if (!PRC_IS_REDIRECT(cmd) &&
1237 ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
1239 /* Source quench is depreciated. */
1240 else if (cmd == PRC_QUENCH)
1243 /* if the parameter is from icmp6, decode it. */
1245 ip6cp = (struct ip6ctlparam *)d;
1247 ip6 = ip6cp->ip6c_ip6;
1248 off = ip6cp->ip6c_off;
1249 sa6_src = ip6cp->ip6c_src;
1253 off = 0; /* fool gcc */
1258 struct in_conninfo inc;
1260 * XXX: We assume that when IPV6 is non NULL,
1261 * M and OFF are valid.
1264 /* check if we can safely examine src and dst ports */
1265 if (m->m_pkthdr.len < off + sizeof(*thp))
1268 bzero(&th, sizeof(th));
1269 m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
1271 in6_pcbnotify(&tcbinfo, sa, th.th_dport,
1272 (struct sockaddr *)ip6cp->ip6c_src,
1273 th.th_sport, cmd, NULL, notify);
1275 inc.inc_fport = th.th_dport;
1276 inc.inc_lport = th.th_sport;
1277 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
1278 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
1280 INP_INFO_WLOCK(&tcbinfo);
1281 syncache_unreach(&inc, &th);
1282 INP_INFO_WUNLOCK(&tcbinfo);
1284 in6_pcbnotify(&tcbinfo, sa, 0, (const struct sockaddr *)sa6_src,
1285 0, cmd, NULL, notify);
1291 * Following is where TCP initial sequence number generation occurs.
1293 * There are two places where we must use initial sequence numbers:
1294 * 1. In SYN-ACK packets.
1295 * 2. In SYN packets.
1297 * All ISNs for SYN-ACK packets are generated by the syncache. See
1298 * tcp_syncache.c for details.
1300 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1301 * depends on this property. In addition, these ISNs should be
1302 * unguessable so as to prevent connection hijacking. To satisfy
1303 * the requirements of this situation, the algorithm outlined in
1304 * RFC 1948 is used, with only small modifications.
1306 * Implementation details:
1308 * Time is based off the system timer, and is corrected so that it
1309 * increases by one megabyte per second. This allows for proper
1310 * recycling on high speed LANs while still leaving over an hour
1313 * As reading the *exact* system time is too expensive to be done
1314 * whenever setting up a TCP connection, we increment the time
1315 * offset in two ways. First, a small random positive increment
1316 * is added to isn_offset for each connection that is set up.
1317 * Second, the function tcp_isn_tick fires once per clock tick
1318 * and increments isn_offset as necessary so that sequence numbers
1319 * are incremented at approximately ISN_BYTES_PER_SECOND. The
1320 * random positive increments serve only to ensure that the same
1321 * exact sequence number is never sent out twice (as could otherwise
1322 * happen when a port is recycled in less than the system tick
1325 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1326 * between seeding of isn_secret. This is normally set to zero,
1327 * as reseeding should not be necessary.
1329 * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
1330 * isn_offset_old, and isn_ctx is performed using the TCP pcbinfo lock. In
1331 * general, this means holding an exclusive (write) lock.
1334 #define ISN_BYTES_PER_SECOND 1048576
1335 #define ISN_STATIC_INCREMENT 4096
1336 #define ISN_RANDOM_INCREMENT (4096 - 1)
1338 static u_char isn_secret[32];
1339 static int isn_last_reseed;
1340 static u_int32_t isn_offset, isn_offset_old;
1341 static MD5_CTX isn_ctx;
1347 u_int32_t md5_buffer[4];
1350 INP_INFO_WLOCK_ASSERT(&tcbinfo);
1351 INP_LOCK_ASSERT(tp->t_inpcb);
1353 /* Seed if this is the first use, reseed if requested. */
1354 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) &&
1355 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1357 read_random(&isn_secret, sizeof(isn_secret));
1358 isn_last_reseed = ticks;
1361 /* Compute the md5 hash and return the ISN. */
1363 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
1364 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
1366 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
1367 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1368 sizeof(struct in6_addr));
1369 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1370 sizeof(struct in6_addr));
1374 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1375 sizeof(struct in_addr));
1376 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1377 sizeof(struct in_addr));
1379 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1380 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1381 new_isn = (tcp_seq) md5_buffer[0];
1382 isn_offset += ISN_STATIC_INCREMENT +
1383 (arc4random() & ISN_RANDOM_INCREMENT);
1384 new_isn += isn_offset;
1389 * Increment the offset to the next ISN_BYTES_PER_SECOND / hz boundary
1390 * to keep time flowing at a relatively constant rate. If the random
1391 * increments have already pushed us past the projected offset, do nothing.
1397 u_int32_t projected_offset;
1399 INP_INFO_WLOCK(&tcbinfo);
1400 projected_offset = isn_offset_old + ISN_BYTES_PER_SECOND / 100;
1402 if (projected_offset > isn_offset)
1403 isn_offset = projected_offset;
1405 isn_offset_old = isn_offset;
1406 callout_reset(&isn_callout, hz/100, tcp_isn_tick, NULL);
1407 INP_INFO_WUNLOCK(&tcbinfo);
1411 * When a specific ICMP unreachable message is received and the
1412 * connection state is SYN-SENT, drop the connection. This behavior
1413 * is controlled by the icmp_may_rst sysctl.
1416 tcp_drop_syn_sent(inp, errno)
1420 struct tcpcb *tp = intotcpcb(inp);
1422 INP_LOCK_ASSERT(inp);
1423 if (tp != NULL && tp->t_state == TCPS_SYN_SENT) {
1424 tcp_drop(tp, errno);
1431 * When `need fragmentation' ICMP is received, update our idea of the MSS
1432 * based on the new value in the route. Also nudge TCP to send something,
1433 * since we know the packet we just sent was dropped.
1434 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1437 tcp_mtudisc(inp, errno)
1441 struct tcpcb *tp = intotcpcb(inp);
1442 struct socket *so = inp->inp_socket;
1450 INP_LOCK_ASSERT(inp);
1453 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
1455 maxmtu = tcp_hc_getmtu(&inp->inp_inc); /* IPv4 and IPv6 */
1458 isipv6 ? tcp_maxmtu6(&inp->inp_inc) :
1460 tcp_maxmtu(&inp->inp_inc);
1464 maxmtu = min(maxmtu, romtu);
1466 tp->t_maxopd = tp->t_maxseg =
1468 isipv6 ? tcp_v6mssdflt :
1476 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1478 sizeof(struct tcpiphdr)
1485 * XXX - The above conditional probably violates the TCP
1486 * spec. The problem is that, since we don't know the
1487 * other end's MSS, we are supposed to use a conservative
1488 * default. But, if we do that, then MTU discovery will
1489 * never actually take place, because the conservative
1490 * default is much less than the MTUs typically seen
1491 * on the Internet today. For the moment, we'll sweep
1492 * this under the carpet.
1494 * The conservative default might not actually be a problem
1495 * if the only case this occurs is when sending an initial
1496 * SYN with options and data to a host we've never talked
1497 * to before. Then, they will reply with an MSS value which
1498 * will get recorded and the new parameters should get
1499 * recomputed. For Further Study.
1501 if (tp->t_maxopd <= mss)
1505 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
1506 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
1507 mss -= TCPOLEN_TSTAMP_APPA;
1508 #if (MCLBYTES & (MCLBYTES - 1)) == 0
1510 mss &= ~(MCLBYTES-1);
1513 mss = mss / MCLBYTES * MCLBYTES;
1515 if (so->so_snd.sb_hiwat < mss)
1516 mss = so->so_snd.sb_hiwat;
1520 tcpstat.tcps_mturesent++;
1522 tp->snd_nxt = tp->snd_una;
1529 * Look-up the routing entry to the peer of this inpcb. If no route
1530 * is found and it cannot be allocated, then return NULL. This routine
1531 * is called by TCP routines that access the rmx structure and by tcp_mss
1532 * to get the interface MTU.
1536 struct in_conninfo *inc;
1539 struct sockaddr_in *dst;
1543 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
1545 bzero(&sro, sizeof(sro));
1546 if (inc->inc_faddr.s_addr != INADDR_ANY) {
1547 dst = (struct sockaddr_in *)&sro.ro_dst;
1548 dst->sin_family = AF_INET;
1549 dst->sin_len = sizeof(*dst);
1550 dst->sin_addr = inc->inc_faddr;
1551 rtalloc_ign(&sro, RTF_CLONING);
1553 if (sro.ro_rt != NULL) {
1554 ifp = sro.ro_rt->rt_ifp;
1555 if (sro.ro_rt->rt_rmx.rmx_mtu == 0)
1556 maxmtu = ifp->if_mtu;
1558 maxmtu = min(sro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu);
1567 struct in_conninfo *inc;
1569 struct route_in6 sro6;
1573 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
1575 bzero(&sro6, sizeof(sro6));
1576 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
1577 sro6.ro_dst.sin6_family = AF_INET6;
1578 sro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6);
1579 sro6.ro_dst.sin6_addr = inc->inc6_faddr;
1580 rtalloc_ign((struct route *)&sro6, RTF_CLONING);
1582 if (sro6.ro_rt != NULL) {
1583 ifp = sro6.ro_rt->rt_ifp;
1584 if (sro6.ro_rt->rt_rmx.rmx_mtu == 0)
1585 maxmtu = IN6_LINKMTU(sro6.ro_rt->rt_ifp);
1587 maxmtu = min(sro6.ro_rt->rt_rmx.rmx_mtu,
1588 IN6_LINKMTU(sro6.ro_rt->rt_ifp));
1597 /* compute ESP/AH header size for TCP, including outer IP header. */
1599 ipsec_hdrsiz_tcp(tp)
1607 struct ip6_hdr *ip6;
1611 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1613 MGETHDR(m, M_DONTWAIT, MT_DATA);
1618 if ((inp->inp_vflag & INP_IPV6) != 0) {
1619 ip6 = mtod(m, struct ip6_hdr *);
1620 th = (struct tcphdr *)(ip6 + 1);
1621 m->m_pkthdr.len = m->m_len =
1622 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1623 tcpip_fillheaders(inp, ip6, th);
1624 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1628 ip = mtod(m, struct ip *);
1629 th = (struct tcphdr *)(ip + 1);
1630 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1631 tcpip_fillheaders(inp, ip, th);
1632 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1641 * Move a TCP connection into TIME_WAIT state.
1642 * tcbinfo is locked.
1643 * inp is locked, and is unlocked before returning.
1651 int tw_time, acknow;
1654 INP_INFO_WLOCK_ASSERT(&tcbinfo); /* tcp_timer_2msl_reset(). */
1655 INP_LOCK_ASSERT(tp->t_inpcb);
1657 tw = uma_zalloc(tcptw_zone, M_NOWAIT);
1659 tw = tcp_timer_2msl_tw(1);
1669 * Recover last window size sent.
1671 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale;
1674 * Set t_recent if timestamps are used on the connection.
1676 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
1677 (TF_REQ_TSTMP|TF_RCVD_TSTMP))
1678 tw->t_recent = tp->ts_recent;
1682 tw->snd_nxt = tp->snd_nxt;
1683 tw->rcv_nxt = tp->rcv_nxt;
1686 tw->t_starttime = tp->t_starttime;
1691 * be used for fin-wait-2 state also, then we may need
1692 * a ts_recent from the last segment.
1694 tw_time = 2 * tcp_msl;
1695 acknow = tp->t_flags & TF_ACKNOW;
1697 so = inp->inp_socket;
1701 tw->tw_cred = crhold(so->so_cred);
1702 tw->tw_so_options = so->so_options;
1704 inp->inp_socket = NULL;
1706 tcp_twrespond(tw, TH_ACK);
1707 inp->inp_ppcb = (caddr_t)tw;
1708 inp->inp_vflag |= INP_TIMEWAIT;
1709 tcp_timer_2msl_reset(tw, tw_time);
1714 * The appromixate rate of ISN increase of Microsoft TCP stacks;
1715 * the actual rate is slightly higher due to the addition of
1716 * random positive increments.
1718 * Most other new OSes use semi-randomized ISN values, so we
1719 * do not need to worry about them.
1721 #define MS_ISN_BYTES_PER_SECOND 250000
1724 * Determine if the ISN we will generate has advanced beyond the last
1725 * sequence number used by the previous connection. If so, indicate
1726 * that it is safe to recycle this tw socket by returning 1.
1728 * XXXRW: This function should assert the inpcb lock as it does multiple
1729 * non-atomic reads from the tcptw, but is currently called without it from
1730 * in_pcb.c:in_pcblookup_local().
1733 tcp_twrecycleable(struct tcptw *tw)
1735 tcp_seq new_iss = tw->iss;
1736 tcp_seq new_irs = tw->irs;
1738 new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz);
1739 new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz);
1741 if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt))
1748 tcp_twclose(struct tcptw *tw, int reuse)
1753 INP_INFO_WLOCK_ASSERT(&tcbinfo); /* tcp_timer_2msl_stop(). */
1754 INP_LOCK_ASSERT(inp);
1756 tw->tw_inpcb = NULL;
1757 tcp_timer_2msl_stop(tw);
1758 inp->inp_ppcb = NULL;
1760 if (inp->inp_vflag & INP_IPV6PROTO)
1765 tcpstat.tcps_closed++;
1766 crfree(tw->tw_cred);
1770 uma_zfree(tcptw_zone, tw);
1775 tcp_twrespond(struct tcptw *tw, int flags)
1777 struct inpcb *inp = tw->tw_inpcb;
1780 struct ip *ip = NULL;
1782 u_int hdrlen, optlen;
1785 struct ip6_hdr *ip6 = NULL;
1786 int isipv6 = inp->inp_inc.inc_isipv6;
1789 INP_LOCK_ASSERT(inp);
1791 m = m_gethdr(M_DONTWAIT, MT_HEADER);
1794 m->m_data += max_linkhdr;
1797 mac_create_mbuf_from_inpcb(inp, m);
1802 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1803 ip6 = mtod(m, struct ip6_hdr *);
1804 th = (struct tcphdr *)(ip6 + 1);
1805 tcpip_fillheaders(inp, ip6, th);
1809 hdrlen = sizeof(struct tcpiphdr);
1810 ip = mtod(m, struct ip *);
1811 th = (struct tcphdr *)(ip + 1);
1812 tcpip_fillheaders(inp, ip, th);
1814 optp = (u_int8_t *)(th + 1);
1817 * Send a timestamp and echo-reply if both our side and our peer
1818 * have sent timestamps in our SYN's and this is not a RST.
1820 if (tw->t_recent && flags == TH_ACK) {
1821 u_int32_t *lp = (u_int32_t *)optp;
1823 /* Form timestamp option as shown in appendix A of RFC 1323. */
1824 *lp++ = htonl(TCPOPT_TSTAMP_HDR);
1825 *lp++ = htonl(ticks);
1826 *lp = htonl(tw->t_recent);
1827 optp += TCPOLEN_TSTAMP_APPA;
1830 optlen = optp - (u_int8_t *)(th + 1);
1832 m->m_len = hdrlen + optlen;
1833 m->m_pkthdr.len = m->m_len;
1835 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
1837 th->th_seq = htonl(tw->snd_nxt);
1838 th->th_ack = htonl(tw->rcv_nxt);
1839 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1840 th->th_flags = flags;
1841 th->th_win = htons(tw->last_win);
1845 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
1846 sizeof(struct tcphdr) + optlen);
1847 ip6->ip6_hlim = in6_selecthlim(inp, NULL);
1848 error = ip6_output(m, inp->in6p_outputopts, NULL,
1849 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
1853 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1854 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
1855 m->m_pkthdr.csum_flags = CSUM_TCP;
1856 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1857 ip->ip_len = m->m_pkthdr.len;
1858 if (path_mtu_discovery)
1859 ip->ip_off |= IP_DF;
1860 error = ip_output(m, inp->inp_options, NULL,
1861 ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
1865 tcpstat.tcps_sndacks++;
1867 tcpstat.tcps_sndctrl++;
1868 tcpstat.tcps_sndtotal++;
1873 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
1875 * This code attempts to calculate the bandwidth-delay product as a
1876 * means of determining the optimal window size to maximize bandwidth,
1877 * minimize RTT, and avoid the over-allocation of buffers on interfaces and
1878 * routers. This code also does a fairly good job keeping RTTs in check
1879 * across slow links like modems. We implement an algorithm which is very
1880 * similar (but not meant to be) TCP/Vegas. The code operates on the
1881 * transmitter side of a TCP connection and so only effects the transmit
1882 * side of the connection.
1884 * BACKGROUND: TCP makes no provision for the management of buffer space
1885 * at the end points or at the intermediate routers and switches. A TCP
1886 * stream, whether using NewReno or not, will eventually buffer as
1887 * many packets as it is able and the only reason this typically works is
1888 * due to the fairly small default buffers made available for a connection
1889 * (typicaly 16K or 32K). As machines use larger windows and/or window
1890 * scaling it is now fairly easy for even a single TCP connection to blow-out
1891 * all available buffer space not only on the local interface, but on
1892 * intermediate routers and switches as well. NewReno makes a misguided
1893 * attempt to 'solve' this problem by waiting for an actual failure to occur,
1894 * then backing off, then steadily increasing the window again until another
1895 * failure occurs, ad-infinitum. This results in terrible oscillation that
1896 * is only made worse as network loads increase and the idea of intentionally
1897 * blowing out network buffers is, frankly, a terrible way to manage network
1900 * It is far better to limit the transmit window prior to the failure
1901 * condition being achieved. There are two general ways to do this: First
1902 * you can 'scan' through different transmit window sizes and locate the
1903 * point where the RTT stops increasing, indicating that you have filled the
1904 * pipe, then scan backwards until you note that RTT stops decreasing, then
1905 * repeat ad-infinitum. This method works in principle but has severe
1906 * implementation issues due to RTT variances, timer granularity, and
1907 * instability in the algorithm which can lead to many false positives and
1908 * create oscillations as well as interact badly with other TCP streams
1909 * implementing the same algorithm.
1911 * The second method is to limit the window to the bandwidth delay product
1912 * of the link. This is the method we implement. RTT variances and our
1913 * own manipulation of the congestion window, bwnd, can potentially
1914 * destabilize the algorithm. For this reason we have to stabilize the
1915 * elements used to calculate the window. We do this by using the minimum
1916 * observed RTT, the long term average of the observed bandwidth, and
1917 * by adding two segments worth of slop. It isn't perfect but it is able
1918 * to react to changing conditions and gives us a very stable basis on
1919 * which to extend the algorithm.
1922 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
1928 INP_LOCK_ASSERT(tp->t_inpcb);
1931 * If inflight_enable is disabled in the middle of a tcp connection,
1932 * make sure snd_bwnd is effectively disabled.
1934 if (tcp_inflight_enable == 0) {
1935 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1936 tp->snd_bandwidth = 0;
1941 * Figure out the bandwidth. Due to the tick granularity this
1942 * is a very rough number and it MUST be averaged over a fairly
1943 * long period of time. XXX we need to take into account a link
1944 * that is not using all available bandwidth, but for now our
1945 * slop will ramp us up if this case occurs and the bandwidth later
1948 * Note: if ticks rollover 'bw' may wind up negative. We must
1949 * effectively reset t_bw_rtttime for this case.
1952 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1)
1955 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz /
1956 (save_ticks - tp->t_bw_rtttime);
1957 tp->t_bw_rtttime = save_ticks;
1958 tp->t_bw_rtseq = ack_seq;
1959 if (tp->t_bw_rtttime == 0 || (int)bw < 0)
1961 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4;
1963 tp->snd_bandwidth = bw;
1966 * Calculate the semi-static bandwidth delay product, plus two maximal
1967 * segments. The additional slop puts us squarely in the sweet
1968 * spot and also handles the bandwidth run-up case and stabilization.
1969 * Without the slop we could be locking ourselves into a lower
1972 * Situations Handled:
1973 * (1) Prevents over-queueing of packets on LANs, especially on
1974 * high speed LANs, allowing larger TCP buffers to be
1975 * specified, and also does a good job preventing
1976 * over-queueing of packets over choke points like modems
1977 * (at least for the transmit side).
1979 * (2) Is able to handle changing network loads (bandwidth
1980 * drops so bwnd drops, bandwidth increases so bwnd
1983 * (3) Theoretically should stabilize in the face of multiple
1984 * connections implementing the same algorithm (this may need
1987 * (4) Stability value (defaults to 20 = 2 maximal packets) can
1988 * be adjusted with a sysctl but typically only needs to be
1989 * on very slow connections. A value no smaller then 5
1990 * should be used, but only reduce this default if you have
1993 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2)
1994 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * tp->t_maxseg / 10;
1997 if (tcp_inflight_debug > 0) {
1999 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) {
2001 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n",
2010 if ((long)bwnd < tcp_inflight_min)
2011 bwnd = tcp_inflight_min;
2012 if (bwnd > tcp_inflight_max)
2013 bwnd = tcp_inflight_max;
2014 if ((long)bwnd < tp->t_maxseg * 2)
2015 bwnd = tp->t_maxseg * 2;
2016 tp->snd_bwnd = bwnd;
2019 #ifdef TCP_SIGNATURE
2021 * Callback function invoked by m_apply() to digest TCP segment data
2022 * contained within an mbuf chain.
2025 tcp_signature_apply(void *fstate, void *data, u_int len)
2028 MD5Update(fstate, (u_char *)data, len);
2033 * Compute TCP-MD5 hash of a TCPv4 segment. (RFC2385)
2036 * m pointer to head of mbuf chain
2037 * off0 offset to TCP header within the mbuf chain
2038 * len length of TCP segment data, excluding options
2039 * optlen length of TCP segment options
2040 * buf pointer to storage for computed MD5 digest
2041 * direction direction of flow (IPSEC_DIR_INBOUND or OUTBOUND)
2043 * We do this over ip, tcphdr, segment data, and the key in the SADB.
2044 * When called from tcp_input(), we can be sure that th_sum has been
2045 * zeroed out and verified already.
2047 * This function is for IPv4 use only. Calling this function with an
2048 * IPv6 packet in the mbuf chain will yield undefined results.
2050 * Return 0 if successful, otherwise return -1.
2052 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
2053 * search with the destination IP address, and a 'magic SPI' to be
2054 * determined by the application. This is hardcoded elsewhere to 1179
2055 * right now. Another branch of this code exists which uses the SPD to
2056 * specify per-application flows but it is unstable.
2059 tcp_signature_compute(struct mbuf *m, int off0, int len, int optlen,
2060 u_char *buf, u_int direction)
2062 union sockaddr_union dst;
2063 struct ippseudo ippseudo;
2067 struct ipovly *ipovly;
2068 struct secasvar *sav;
2072 KASSERT(m != NULL, ("NULL mbuf chain"));
2073 KASSERT(buf != NULL, ("NULL signature pointer"));
2075 /* Extract the destination from the IP header in the mbuf. */
2076 ip = mtod(m, struct ip *);
2077 bzero(&dst, sizeof(union sockaddr_union));
2078 dst.sa.sa_len = sizeof(struct sockaddr_in);
2079 dst.sa.sa_family = AF_INET;
2080 dst.sin.sin_addr = (direction == IPSEC_DIR_INBOUND) ?
2081 ip->ip_src : ip->ip_dst;
2083 /* Look up an SADB entry which matches the address of the peer. */
2084 sav = KEY_ALLOCSA(&dst, IPPROTO_TCP, htonl(TCP_SIG_SPI));
2086 printf("%s: SADB lookup failed for %s\n", __func__,
2087 inet_ntoa(dst.sin.sin_addr));
2092 ipovly = (struct ipovly *)ip;
2093 th = (struct tcphdr *)((u_char *)ip + off0);
2094 doff = off0 + sizeof(struct tcphdr) + optlen;
2097 * Step 1: Update MD5 hash with IP pseudo-header.
2099 * XXX The ippseudo header MUST be digested in network byte order,
2100 * or else we'll fail the regression test. Assume all fields we've
2101 * been doing arithmetic on have been in host byte order.
2102 * XXX One cannot depend on ipovly->ih_len here. When called from
2103 * tcp_output(), the underlying ip_len member has not yet been set.
2105 ippseudo.ippseudo_src = ipovly->ih_src;
2106 ippseudo.ippseudo_dst = ipovly->ih_dst;
2107 ippseudo.ippseudo_pad = 0;
2108 ippseudo.ippseudo_p = IPPROTO_TCP;
2109 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen);
2110 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo));
2113 * Step 2: Update MD5 hash with TCP header, excluding options.
2114 * The TCP checksum must be set to zero.
2116 savecsum = th->th_sum;
2118 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr));
2119 th->th_sum = savecsum;
2122 * Step 3: Update MD5 hash with TCP segment data.
2123 * Use m_apply() to avoid an early m_pullup().
2126 m_apply(m, doff, len, tcp_signature_apply, &ctx);
2129 * Step 4: Update MD5 hash with shared secret.
2131 MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth));
2132 MD5Final(buf, &ctx);
2134 key_sa_recordxfer(sav, m);
2138 #endif /* TCP_SIGNATURE */
2141 sysctl_drop(SYSCTL_HANDLER_ARGS)
2143 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
2144 struct sockaddr_storage addrs[2];
2147 struct sockaddr_in *fin, *lin;
2149 struct sockaddr_in6 *fin6, *lin6;
2150 struct in6_addr f6, l6;
2161 if (req->oldptr != NULL || req->oldlen != 0)
2163 if (req->newptr == NULL)
2165 if (req->newlen < sizeof(addrs))
2167 error = SYSCTL_IN(req, &addrs, sizeof(addrs));
2171 switch (addrs[0].ss_family) {
2174 fin6 = (struct sockaddr_in6 *)&addrs[0];
2175 lin6 = (struct sockaddr_in6 *)&addrs[1];
2176 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
2177 lin6->sin6_len != sizeof(struct sockaddr_in6))
2179 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
2180 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
2182 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
2183 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
2184 fin = (struct sockaddr_in *)&addrs[0];
2185 lin = (struct sockaddr_in *)&addrs[1];
2188 error = in6_embedscope(&f6, fin6, NULL, NULL);
2191 error = in6_embedscope(&l6, lin6, NULL, NULL);
2197 fin = (struct sockaddr_in *)&addrs[0];
2198 lin = (struct sockaddr_in *)&addrs[1];
2199 if (fin->sin_len != sizeof(struct sockaddr_in) ||
2200 lin->sin_len != sizeof(struct sockaddr_in))
2206 INP_INFO_WLOCK(&tcbinfo);
2207 switch (addrs[0].ss_family) {
2210 inp = in6_pcblookup_hash(&tcbinfo, &f6, fin6->sin6_port,
2211 &l6, lin6->sin6_port, 0, NULL);
2215 inp = in_pcblookup_hash(&tcbinfo, fin->sin_addr, fin->sin_port,
2216 lin->sin_addr, lin->sin_port, 0, NULL);
2221 if ((tp = intotcpcb(inp)) &&
2222 ((inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) {
2223 tp = tcp_drop(tp, ECONNABORTED);
2230 INP_INFO_WUNLOCK(&tcbinfo);
2234 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop,
2235 CTLTYPE_STRUCT|CTLFLAG_WR|CTLFLAG_SKIP, NULL,
2236 0, sysctl_drop, "", "Drop TCP connection");