2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
33 #include "opt_compat.h"
35 #include "opt_inet6.h"
36 #include "opt_ipsec.h"
38 #include "opt_tcpdebug.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/callout.h>
43 #include <sys/kernel.h>
44 #include <sys/sysctl.h>
45 #include <sys/malloc.h>
48 #include <sys/domain.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/protosw.h>
55 #include <sys/random.h>
59 #include <net/route.h>
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/ip.h>
66 #include <netinet/ip6.h>
68 #include <netinet/in_pcb.h>
70 #include <netinet6/in6_pcb.h>
72 #include <netinet/in_var.h>
73 #include <netinet/ip_var.h>
75 #include <netinet6/ip6_var.h>
76 #include <netinet6/scope6_var.h>
77 #include <netinet6/nd6.h>
79 #include <netinet/ip_icmp.h>
80 #include <netinet/tcp.h>
81 #include <netinet/tcp_fsm.h>
82 #include <netinet/tcp_seq.h>
83 #include <netinet/tcp_timer.h>
84 #include <netinet/tcp_var.h>
86 #include <netinet6/tcp6_var.h>
88 #include <netinet/tcpip.h>
90 #include <netinet/tcp_debug.h>
92 #include <netinet6/ip6protosw.h>
95 #include <netinet6/ipsec.h>
97 #include <netinet6/ipsec6.h>
99 #include <netkey/key.h>
103 #include <netipsec/ipsec.h>
104 #include <netipsec/xform.h>
106 #include <netipsec/ipsec6.h>
108 #include <netipsec/key.h>
110 #endif /*FAST_IPSEC*/
112 #include <machine/in_cksum.h>
115 #include <security/mac/mac_framework.h>
117 int tcp_mssdflt = TCP_MSS;
118 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
119 &tcp_mssdflt, 0, "Default TCP Maximum Segment Size");
122 int tcp_v6mssdflt = TCP6_MSS;
123 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
124 CTLFLAG_RW, &tcp_v6mssdflt , 0,
125 "Default TCP Maximum Segment Size for IPv6");
129 * Minimum MSS we accept and use. This prevents DoS attacks where
130 * we are forced to a ridiculous low MSS like 20 and send hundreds
131 * of packets instead of one. The effect scales with the available
132 * bandwidth and quickly saturates the CPU and network interface
133 * with packet generation and sending. Set to zero to disable MINMSS
134 * checking. This setting prevents us from sending too small packets.
136 int tcp_minmss = TCP_MINMSS;
137 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
138 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
140 int tcp_do_rfc1323 = 1;
141 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
142 &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions");
144 static int tcp_tcbhashsize = 0;
145 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN,
146 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
148 static int do_tcpdrain = 1;
149 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW,
151 "Enable tcp_drain routine for extra help when low on mbufs");
153 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
154 &tcbinfo.ipi_count, 0, "Number of active PCBs");
156 static int icmp_may_rst = 1;
157 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW,
159 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
161 static int tcp_isn_reseed_interval = 0;
162 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
163 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
165 static uma_zone_t tcptw_zone;
169 tcptw_auto_size(void)
174 * Max out at half the ephemeral port range so that TIME_WAIT
175 * sockets don't tie up too many ephemeral ports.
177 if (ipport_lastauto > ipport_firstauto)
178 halfrange = (ipport_lastauto - ipport_firstauto) / 2;
180 halfrange = (ipport_firstauto - ipport_lastauto) / 2;
181 /* Protect against goofy port ranges smaller than 32. */
182 return (imin(imax(halfrange, 32), maxsockets / 5));
186 sysctl_maxtcptw(SYSCTL_HANDLER_ARGS)
191 new = tcptw_auto_size();
194 error = sysctl_handle_int(oidp, &new, sizeof(int), req);
195 if (error == 0 && req->newptr)
198 uma_zone_set_max(tcptw_zone, maxtcptw);
202 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw, CTLTYPE_INT|CTLFLAG_RW,
203 &maxtcptw, 0, sysctl_maxtcptw, "IU",
204 "Maximum number of compressed TCP TIME_WAIT entries");
206 static int nolocaltimewait = 0;
207 SYSCTL_INT(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_RW,
209 "Do not create compressed TCP TIME_WAIT entries for local connections");
212 * TCP bandwidth limiting sysctls. Note that the default lower bound of
213 * 1024 exists only for debugging. A good production default would be
214 * something like 6100.
216 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, inflight, CTLFLAG_RW, 0,
217 "TCP inflight data limiting");
219 static int tcp_inflight_enable = 1;
220 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, enable, CTLFLAG_RW,
221 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting");
223 static int tcp_inflight_debug = 0;
224 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, debug, CTLFLAG_RW,
225 &tcp_inflight_debug, 0, "Debug TCP inflight calculations");
227 static int tcp_inflight_rttthresh;
228 SYSCTL_PROC(_net_inet_tcp_inflight, OID_AUTO, rttthresh, CTLTYPE_INT|CTLFLAG_RW,
229 &tcp_inflight_rttthresh, 0, sysctl_msec_to_ticks, "I",
230 "RTT threshold below which inflight will deactivate itself");
232 static int tcp_inflight_min = 6144;
233 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, min, CTLFLAG_RW,
234 &tcp_inflight_min, 0, "Lower-bound for TCP inflight window");
236 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT;
237 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, max, CTLFLAG_RW,
238 &tcp_inflight_max, 0, "Upper-bound for TCP inflight window");
240 static int tcp_inflight_stab = 20;
241 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, stab, CTLFLAG_RW,
242 &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets");
244 uma_zone_t sack_hole_zone;
246 static struct inpcb *tcp_notify(struct inpcb *, int);
247 static void tcp_isn_tick(void *);
250 * Target size of TCP PCB hash tables. Must be a power of two.
252 * Note that this can be overridden by the kernel environment
253 * variable net.inet.tcp.tcbhashsize
256 #define TCBHASHSIZE 512
261 * Callouts should be moved into struct tcp directly. They are currently
262 * separate because the tcpcb structure is exported to userland for sysctl
263 * parsing purposes, which do not know about callouts.
270 static uma_zone_t tcpcb_zone;
271 struct callout isn_callout;
272 static struct mtx isn_mtx;
274 #define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
275 #define ISN_LOCK() mtx_lock(&isn_mtx)
276 #define ISN_UNLOCK() mtx_unlock(&isn_mtx)
279 * TCP initialization.
282 tcp_zone_change(void *tag)
285 uma_zone_set_max(tcbinfo.ipi_zone, maxsockets);
286 uma_zone_set_max(tcpcb_zone, maxsockets);
288 uma_zone_set_max(tcptw_zone, tcptw_auto_size());
292 tcp_inpcb_init(void *mem, int size, int flags)
294 struct inpcb *inp = mem;
296 INP_LOCK_INIT(inp, "inp", "tcpinp");
303 int hashsize = TCBHASHSIZE;
305 tcp_delacktime = TCPTV_DELACK;
306 tcp_keepinit = TCPTV_KEEP_INIT;
307 tcp_keepidle = TCPTV_KEEP_IDLE;
308 tcp_keepintvl = TCPTV_KEEPINTVL;
309 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
311 tcp_rexmit_min = TCPTV_MIN;
312 tcp_rexmit_slop = TCPTV_CPU_VAR;
313 tcp_inflight_rttthresh = TCPTV_INFLIGHT_RTTTHRESH;
314 tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT;
316 INP_INFO_LOCK_INIT(&tcbinfo, "tcp");
318 tcbinfo.ipi_listhead = &tcb;
319 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
320 if (!powerof2(hashsize)) {
321 printf("WARNING: TCB hash size not a power of 2\n");
322 hashsize = 512; /* safe default */
324 tcp_tcbhashsize = hashsize;
325 tcbinfo.ipi_hashbase = hashinit(hashsize, M_PCB,
326 &tcbinfo.ipi_hashmask);
327 tcbinfo.ipi_porthashbase = hashinit(hashsize, M_PCB,
328 &tcbinfo.ipi_porthashmask);
329 tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb),
330 NULL, NULL, tcp_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
331 uma_zone_set_max(tcbinfo.ipi_zone, maxsockets);
333 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
335 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
337 if (max_protohdr < TCP_MINPROTOHDR)
338 max_protohdr = TCP_MINPROTOHDR;
339 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
341 #undef TCP_MINPROTOHDR
343 * These have to be type stable for the benefit of the timers.
345 tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
346 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
347 uma_zone_set_max(tcpcb_zone, maxsockets);
348 tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
349 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
350 TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw);
352 uma_zone_set_max(tcptw_zone, tcptw_auto_size());
354 uma_zone_set_max(tcptw_zone, maxtcptw);
360 callout_init(&isn_callout, CALLOUT_MPSAFE);
362 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
363 SHUTDOWN_PRI_DEFAULT);
364 sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
365 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
366 EVENTHANDLER_REGISTER(maxsockets_change, tcp_zone_change, NULL,
367 EVENTHANDLER_PRI_ANY);
374 callout_stop(&isn_callout);
378 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
379 * tcp_template used to store this data in mbufs, but we now recopy it out
380 * of the tcpcb each time to conserve mbufs.
383 tcpip_fillheaders(struct inpcb *inp, void *ip_ptr, void *tcp_ptr)
385 struct tcphdr *th = (struct tcphdr *)tcp_ptr;
387 INP_LOCK_ASSERT(inp);
390 if ((inp->inp_vflag & INP_IPV6) != 0) {
393 ip6 = (struct ip6_hdr *)ip_ptr;
394 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
395 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
396 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
397 (IPV6_VERSION & IPV6_VERSION_MASK);
398 ip6->ip6_nxt = IPPROTO_TCP;
399 ip6->ip6_plen = sizeof(struct tcphdr);
400 ip6->ip6_src = inp->in6p_laddr;
401 ip6->ip6_dst = inp->in6p_faddr;
407 ip = (struct ip *)ip_ptr;
408 ip->ip_v = IPVERSION;
410 ip->ip_tos = inp->inp_ip_tos;
414 ip->ip_ttl = inp->inp_ip_ttl;
416 ip->ip_p = IPPROTO_TCP;
417 ip->ip_src = inp->inp_laddr;
418 ip->ip_dst = inp->inp_faddr;
420 th->th_sport = inp->inp_lport;
421 th->th_dport = inp->inp_fport;
429 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */
433 * Create template to be used to send tcp packets on a connection.
434 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
435 * use for this function is in keepalives, which use tcp_respond.
438 tcpip_maketemplate(struct inpcb *inp)
443 m = m_get(M_DONTWAIT, MT_DATA);
446 m->m_len = sizeof(struct tcptemp);
447 n = mtod(m, struct tcptemp *);
449 tcpip_fillheaders(inp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
454 * Send a single message to the TCP at address specified by
455 * the given TCP/IP header. If m == NULL, then we make a copy
456 * of the tcpiphdr at ti and send directly to the addressed host.
457 * This is used to force keep alive messages out using the TCP
458 * template for a connection. If flags are given then we send
459 * a message back to the TCP which originated the * segment ti,
460 * and discard the mbuf containing it and any other attached mbufs.
462 * In any case the ack and sequence number of the transmitted
463 * segment are as specified by the parameters.
465 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
468 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th,
469 struct mbuf *m, tcp_seq ack, tcp_seq seq, int flags)
482 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
485 isipv6 = ((struct ip *)ipgen)->ip_v == 6;
492 KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
493 INP_LOCK_ASSERT(inp);
498 if (!(flags & TH_RST)) {
499 win = sbspace(&inp->inp_socket->so_rcv);
500 if (win > (long)TCP_MAXWIN << tp->rcv_scale)
501 win = (long)TCP_MAXWIN << tp->rcv_scale;
505 m = m_gethdr(M_DONTWAIT, MT_DATA);
509 m->m_data += max_linkhdr;
512 bcopy((caddr_t)ip6, mtod(m, caddr_t),
513 sizeof(struct ip6_hdr));
514 ip6 = mtod(m, struct ip6_hdr *);
515 nth = (struct tcphdr *)(ip6 + 1);
519 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
520 ip = mtod(m, struct ip *);
521 nth = (struct tcphdr *)(ip + 1);
523 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
528 m->m_data = (caddr_t)ipgen;
529 /* m_len is set later */
531 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
534 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
535 nth = (struct tcphdr *)(ip6 + 1);
539 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
540 nth = (struct tcphdr *)(ip + 1);
544 * this is usually a case when an extension header
545 * exists between the IPv6 header and the
548 nth->th_sport = th->th_sport;
549 nth->th_dport = th->th_dport;
551 xchg(nth->th_dport, nth->th_sport, n_short);
557 ip6->ip6_vfc = IPV6_VERSION;
558 ip6->ip6_nxt = IPPROTO_TCP;
559 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
561 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
565 tlen += sizeof (struct tcpiphdr);
567 ip->ip_ttl = ip_defttl;
568 if (path_mtu_discovery)
572 m->m_pkthdr.len = tlen;
573 m->m_pkthdr.rcvif = NULL;
577 * Packet is associated with a socket, so allow the
578 * label of the response to reflect the socket label.
580 INP_LOCK_ASSERT(inp);
581 mac_create_mbuf_from_inpcb(inp, m);
584 * Packet is not associated with a socket, so possibly
585 * update the label in place.
587 mac_reflect_mbuf_tcp(m);
590 nth->th_seq = htonl(seq);
591 nth->th_ack = htonl(ack);
593 nth->th_off = sizeof (struct tcphdr) >> 2;
594 nth->th_flags = flags;
596 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
598 nth->th_win = htons((u_short)win);
603 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
604 sizeof(struct ip6_hdr),
605 tlen - sizeof(struct ip6_hdr));
606 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
611 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
612 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
613 m->m_pkthdr.csum_flags = CSUM_TCP;
614 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
617 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
618 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
622 (void) ip6_output(m, NULL, NULL, ipflags, NULL, NULL, inp);
625 (void) ip_output(m, NULL, NULL, ipflags, NULL, inp);
629 * Create a new TCP control block, making an
630 * empty reassembly queue and hooking it to the argument
631 * protocol control block. The `inp' parameter must have
632 * come from the zone allocator set up in tcp_init().
635 tcp_newtcpcb(struct inpcb *inp)
637 struct tcpcb_mem *tm;
640 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
643 tm = uma_zalloc(tcpcb_zone, M_NOWAIT | M_ZERO);
647 tp->t_timers = &tm->tt;
648 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */
649 tp->t_maxseg = tp->t_maxopd =
651 isipv6 ? tcp_v6mssdflt :
655 /* Set up our timeouts. */
656 callout_init_mtx(&tp->t_timers->tt_timer, &inp->inp_mtx,
657 CALLOUT_RETURNUNLOCKED);
660 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
661 tp->sack_enable = tcp_do_sack;
662 TAILQ_INIT(&tp->snd_holes);
663 tp->t_inpcb = inp; /* XXX */
665 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
666 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
667 * reasonable initial retransmit time.
669 tp->t_srtt = TCPTV_SRTTBASE;
670 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
671 tp->t_rttmin = tcp_rexmit_min;
672 tp->t_rxtcur = TCPTV_RTOBASE;
673 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
674 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
675 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
676 tp->t_rcvtime = ticks;
677 tp->t_bw_rtttime = ticks;
679 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
680 * because the socket may be bound to an IPv6 wildcard address,
681 * which may match an IPv4-mapped IPv6 address.
683 inp->inp_ip_ttl = ip_defttl;
685 return (tp); /* XXX */
689 * Drop a TCP connection, reporting
690 * the specified error. If connection is synchronized,
691 * then send a RST to peer.
694 tcp_drop(struct tcpcb *tp, int errno)
696 struct socket *so = tp->t_inpcb->inp_socket;
698 INP_INFO_WLOCK_ASSERT(&tcbinfo);
699 INP_LOCK_ASSERT(tp->t_inpcb);
701 if (TCPS_HAVERCVDSYN(tp->t_state)) {
702 tp->t_state = TCPS_CLOSED;
703 (void) tcp_output(tp);
704 tcpstat.tcps_drops++;
706 tcpstat.tcps_conndrops++;
707 if (errno == ETIMEDOUT && tp->t_softerror)
708 errno = tp->t_softerror;
709 so->so_error = errno;
710 return (tcp_close(tp));
714 tcp_discardcb(struct tcpcb *tp)
717 struct inpcb *inp = tp->t_inpcb;
718 struct socket *so = inp->inp_socket;
720 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
723 INP_LOCK_ASSERT(inp);
726 * Make sure that all of our timers are stopped before we
729 * XXX: callout_stop() may race and a callout may already
730 * try to obtain the INP_LOCK. Only callout_drain() would
731 * stop this but it would cause a LOR thus we can't use it.
732 * The tcp_timer() function contains a lot of checks to
733 * handle this case rather gracefully.
735 tp->t_timers->tt_active = 0;
736 callout_stop(&tp->t_timers->tt_timer);
739 * If we got enough samples through the srtt filter,
740 * save the rtt and rttvar in the routing entry.
741 * 'Enough' is arbitrarily defined as 4 rtt samples.
742 * 4 samples is enough for the srtt filter to converge
743 * to within enough % of the correct value; fewer samples
744 * and we could save a bogus rtt. The danger is not high
745 * as tcp quickly recovers from everything.
746 * XXX: Works very well but needs some more statistics!
748 if (tp->t_rttupdated >= 4) {
749 struct hc_metrics_lite metrics;
752 bzero(&metrics, sizeof(metrics));
754 * Update the ssthresh always when the conditions below
755 * are satisfied. This gives us better new start value
756 * for the congestion avoidance for new connections.
757 * ssthresh is only set if packet loss occured on a session.
759 * XXXRW: 'so' may be NULL here, and/or socket buffer may be
760 * being torn down. Ideally this code would not use 'so'.
762 ssthresh = tp->snd_ssthresh;
763 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
765 * convert the limit from user data bytes to
766 * packets then to packet data bytes.
768 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
771 ssthresh *= (u_long)(tp->t_maxseg +
773 (isipv6 ? sizeof (struct ip6_hdr) +
774 sizeof (struct tcphdr) :
776 sizeof (struct tcpiphdr)
783 metrics.rmx_ssthresh = ssthresh;
785 metrics.rmx_rtt = tp->t_srtt;
786 metrics.rmx_rttvar = tp->t_rttvar;
787 /* XXX: This wraps if the pipe is more than 4 Gbit per second */
788 metrics.rmx_bandwidth = tp->snd_bandwidth;
789 metrics.rmx_cwnd = tp->snd_cwnd;
790 metrics.rmx_sendpipe = 0;
791 metrics.rmx_recvpipe = 0;
793 tcp_hc_update(&inp->inp_inc, &metrics);
796 /* free the reassembly queue, if any */
797 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) {
798 LIST_REMOVE(q, tqe_q);
800 uma_zfree(tcp_reass_zone, q);
804 tcp_free_sackholes(tp);
805 inp->inp_ppcb = NULL;
807 uma_zfree(tcpcb_zone, tp);
811 * Attempt to close a TCP control block, marking it as dropped, and freeing
812 * the socket if we hold the only reference.
815 tcp_close(struct tcpcb *tp)
817 struct inpcb *inp = tp->t_inpcb;
820 INP_INFO_WLOCK_ASSERT(&tcbinfo);
821 INP_LOCK_ASSERT(inp);
824 tcpstat.tcps_closed++;
825 KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
826 so = inp->inp_socket;
827 soisdisconnected(so);
828 if (inp->inp_vflag & INP_SOCKREF) {
829 KASSERT(so->so_state & SS_PROTOREF,
830 ("tcp_close: !SS_PROTOREF"));
831 inp->inp_vflag &= ~INP_SOCKREF;
835 so->so_state &= ~SS_PROTOREF;
849 struct tseg_qent *te;
852 * Walk the tcpbs, if existing, and flush the reassembly queue,
854 * XXX: The "Net/3" implementation doesn't imply that the TCP
855 * reassembly queue should be flushed, but in a situation
856 * where we're really low on mbufs, this is potentially
859 INP_INFO_RLOCK(&tcbinfo);
860 LIST_FOREACH(inpb, tcbinfo.ipi_listhead, inp_list) {
861 if (inpb->inp_vflag & INP_TIMEWAIT)
864 if ((tcpb = intotcpcb(inpb)) != NULL) {
865 while ((te = LIST_FIRST(&tcpb->t_segq))
867 LIST_REMOVE(te, tqe_q);
869 uma_zfree(tcp_reass_zone, te);
873 tcp_clean_sackreport(tcpb);
877 INP_INFO_RUNLOCK(&tcbinfo);
882 * Notify a tcp user of an asynchronous error;
883 * store error as soft error, but wake up user
884 * (for now, won't do anything until can select for soft error).
886 * Do not wake up user since there currently is no mechanism for
887 * reporting soft errors (yet - a kqueue filter may be added).
889 static struct inpcb *
890 tcp_notify(struct inpcb *inp, int error)
894 INP_INFO_WLOCK_ASSERT(&tcbinfo);
895 INP_LOCK_ASSERT(inp);
897 if ((inp->inp_vflag & INP_TIMEWAIT) ||
898 (inp->inp_vflag & INP_DROPPED))
902 KASSERT(tp != NULL, ("tcp_notify: tp == NULL"));
905 * Ignore some errors if we are hooked up.
906 * If connection hasn't completed, has retransmitted several times,
907 * and receives a second error, give up now. This is better
908 * than waiting a long time to establish a connection that
909 * can never complete.
911 if (tp->t_state == TCPS_ESTABLISHED &&
912 (error == EHOSTUNREACH || error == ENETUNREACH ||
913 error == EHOSTDOWN)) {
915 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
917 tp = tcp_drop(tp, error);
923 tp->t_softerror = error;
927 wakeup( &so->so_timeo);
934 tcp_pcblist(SYSCTL_HANDLER_ARGS)
937 struct inpcb *inp, **inp_list;
942 * The process of preparing the TCB list is too time-consuming and
943 * resource-intensive to repeat twice on every request.
945 if (req->oldptr == NULL) {
946 n = tcbinfo.ipi_count;
947 req->oldidx = 2 * (sizeof xig)
948 + (n + n/8) * sizeof(struct xtcpcb);
952 if (req->newptr != NULL)
956 * OK, now we're committed to doing something.
958 INP_INFO_RLOCK(&tcbinfo);
959 gencnt = tcbinfo.ipi_gencnt;
960 n = tcbinfo.ipi_count;
961 INP_INFO_RUNLOCK(&tcbinfo);
963 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
964 + n * sizeof(struct xtcpcb));
968 xig.xig_len = sizeof xig;
970 xig.xig_gen = gencnt;
971 xig.xig_sogen = so_gencnt;
972 error = SYSCTL_OUT(req, &xig, sizeof xig);
976 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
977 if (inp_list == NULL)
980 INP_INFO_RLOCK(&tcbinfo);
981 for (inp = LIST_FIRST(tcbinfo.ipi_listhead), i = 0; inp != NULL && i
982 < n; inp = LIST_NEXT(inp, inp_list)) {
984 if (inp->inp_gencnt <= gencnt) {
986 * XXX: This use of cr_cansee(), introduced with
987 * TCP state changes, is not quite right, but for
988 * now, better than nothing.
990 if (inp->inp_vflag & INP_TIMEWAIT) {
991 if (intotw(inp) != NULL)
992 error = cr_cansee(req->td->td_ucred,
993 intotw(inp)->tw_cred);
995 error = EINVAL; /* Skip this inp. */
997 error = cr_canseesocket(req->td->td_ucred,
1000 inp_list[i++] = inp;
1004 INP_INFO_RUNLOCK(&tcbinfo);
1008 for (i = 0; i < n; i++) {
1011 if (inp->inp_gencnt <= gencnt) {
1015 bzero(&xt, sizeof(xt));
1016 xt.xt_len = sizeof xt;
1017 /* XXX should avoid extra copy */
1018 bcopy(inp, &xt.xt_inp, sizeof *inp);
1019 inp_ppcb = inp->inp_ppcb;
1020 if (inp_ppcb == NULL)
1021 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
1022 else if (inp->inp_vflag & INP_TIMEWAIT) {
1023 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
1024 xt.xt_tp.t_state = TCPS_TIME_WAIT;
1026 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
1027 if (inp->inp_socket != NULL)
1028 sotoxsocket(inp->inp_socket, &xt.xt_socket);
1030 bzero(&xt.xt_socket, sizeof xt.xt_socket);
1031 xt.xt_socket.xso_protocol = IPPROTO_TCP;
1033 xt.xt_inp.inp_gencnt = inp->inp_gencnt;
1035 error = SYSCTL_OUT(req, &xt, sizeof xt);
1042 * Give the user an updated idea of our state.
1043 * If the generation differs from what we told
1044 * her before, she knows that something happened
1045 * while we were processing this request, and it
1046 * might be necessary to retry.
1048 INP_INFO_RLOCK(&tcbinfo);
1049 xig.xig_gen = tcbinfo.ipi_gencnt;
1050 xig.xig_sogen = so_gencnt;
1051 xig.xig_count = tcbinfo.ipi_count;
1052 INP_INFO_RUNLOCK(&tcbinfo);
1053 error = SYSCTL_OUT(req, &xig, sizeof xig);
1055 free(inp_list, M_TEMP);
1059 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
1060 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
1063 tcp_getcred(SYSCTL_HANDLER_ARGS)
1066 struct sockaddr_in addrs[2];
1070 error = priv_check_cred(req->td->td_ucred, PRIV_NETINET_GETCRED,
1074 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1077 INP_INFO_RLOCK(&tcbinfo);
1078 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
1079 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
1085 if (inp->inp_socket == NULL) {
1089 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket);
1092 cru2x(inp->inp_socket->so_cred, &xuc);
1096 INP_INFO_RUNLOCK(&tcbinfo);
1098 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1102 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
1103 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1104 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection");
1108 tcp6_getcred(SYSCTL_HANDLER_ARGS)
1111 struct sockaddr_in6 addrs[2];
1113 int error, mapped = 0;
1115 error = priv_check_cred(req->td->td_ucred, PRIV_NETINET_GETCRED,
1119 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1122 if ((error = sa6_embedscope(&addrs[0], ip6_use_defzone)) != 0 ||
1123 (error = sa6_embedscope(&addrs[1], ip6_use_defzone)) != 0) {
1126 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
1127 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
1133 INP_INFO_RLOCK(&tcbinfo);
1135 inp = in_pcblookup_hash(&tcbinfo,
1136 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
1138 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
1142 inp = in6_pcblookup_hash(&tcbinfo,
1143 &addrs[1].sin6_addr, addrs[1].sin6_port,
1144 &addrs[0].sin6_addr, addrs[0].sin6_port, 0, NULL);
1150 if (inp->inp_socket == NULL) {
1154 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket);
1157 cru2x(inp->inp_socket->so_cred, &xuc);
1161 INP_INFO_RUNLOCK(&tcbinfo);
1163 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1167 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
1168 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1169 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection");
1174 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
1176 struct ip *ip = vip;
1178 struct in_addr faddr;
1181 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1183 struct in_conninfo inc;
1184 tcp_seq icmp_tcp_seq;
1187 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1188 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1191 if (cmd == PRC_MSGSIZE)
1192 notify = tcp_mtudisc;
1193 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1194 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip)
1195 notify = tcp_drop_syn_sent;
1197 * Redirects don't need to be handled up here.
1199 else if (PRC_IS_REDIRECT(cmd))
1202 * Source quench is depreciated.
1204 else if (cmd == PRC_QUENCH)
1207 * Hostdead is ugly because it goes linearly through all PCBs.
1208 * XXX: We never get this from ICMP, otherwise it makes an
1209 * excellent DoS attack on machines with many connections.
1211 else if (cmd == PRC_HOSTDEAD)
1213 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
1216 icp = (struct icmp *)((caddr_t)ip
1217 - offsetof(struct icmp, icmp_ip));
1218 th = (struct tcphdr *)((caddr_t)ip
1219 + (ip->ip_hl << 2));
1220 INP_INFO_WLOCK(&tcbinfo);
1221 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
1222 ip->ip_src, th->th_sport, 0, NULL);
1225 if (!(inp->inp_vflag & INP_TIMEWAIT) &&
1226 !(inp->inp_vflag & INP_DROPPED) &&
1227 !(inp->inp_socket == NULL)) {
1228 icmp_tcp_seq = htonl(th->th_seq);
1229 tp = intotcpcb(inp);
1230 if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
1231 SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
1232 if (cmd == PRC_MSGSIZE) {
1235 * If we got a needfrag set the MTU
1236 * in the route to the suggested new
1237 * value (if given) and then notify.
1239 bzero(&inc, sizeof(inc));
1240 inc.inc_flags = 0; /* IPv4 */
1241 inc.inc_faddr = faddr;
1243 mtu = ntohs(icp->icmp_nextmtu);
1245 * If no alternative MTU was
1246 * proposed, try the next smaller
1247 * one. ip->ip_len has already
1248 * been swapped in icmp_input().
1251 mtu = ip_next_mtu(ip->ip_len,
1253 if (mtu < max(296, (tcp_minmss)
1254 + sizeof(struct tcpiphdr)))
1258 + sizeof(struct tcpiphdr);
1260 * Only cache the the MTU if it
1261 * is smaller than the interface
1262 * or route MTU. tcp_mtudisc()
1263 * will do right thing by itself.
1265 if (mtu <= tcp_maxmtu(&inc, NULL))
1266 tcp_hc_updatemtu(&inc, mtu);
1269 inp = (*notify)(inp, inetctlerrmap[cmd]);
1275 inc.inc_fport = th->th_dport;
1276 inc.inc_lport = th->th_sport;
1277 inc.inc_faddr = faddr;
1278 inc.inc_laddr = ip->ip_src;
1282 syncache_unreach(&inc, th);
1284 INP_INFO_WUNLOCK(&tcbinfo);
1286 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
1291 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
1294 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1295 struct ip6_hdr *ip6;
1297 struct ip6ctlparam *ip6cp = NULL;
1298 const struct sockaddr_in6 *sa6_src = NULL;
1300 struct tcp_portonly {
1305 if (sa->sa_family != AF_INET6 ||
1306 sa->sa_len != sizeof(struct sockaddr_in6))
1309 if (cmd == PRC_MSGSIZE)
1310 notify = tcp_mtudisc;
1311 else if (!PRC_IS_REDIRECT(cmd) &&
1312 ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
1314 /* Source quench is depreciated. */
1315 else if (cmd == PRC_QUENCH)
1318 /* if the parameter is from icmp6, decode it. */
1320 ip6cp = (struct ip6ctlparam *)d;
1322 ip6 = ip6cp->ip6c_ip6;
1323 off = ip6cp->ip6c_off;
1324 sa6_src = ip6cp->ip6c_src;
1328 off = 0; /* fool gcc */
1333 struct in_conninfo inc;
1335 * XXX: We assume that when IPV6 is non NULL,
1336 * M and OFF are valid.
1339 /* check if we can safely examine src and dst ports */
1340 if (m->m_pkthdr.len < off + sizeof(*thp))
1343 bzero(&th, sizeof(th));
1344 m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
1346 in6_pcbnotify(&tcbinfo, sa, th.th_dport,
1347 (struct sockaddr *)ip6cp->ip6c_src,
1348 th.th_sport, cmd, NULL, notify);
1350 inc.inc_fport = th.th_dport;
1351 inc.inc_lport = th.th_sport;
1352 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
1353 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
1355 INP_INFO_WLOCK(&tcbinfo);
1356 syncache_unreach(&inc, &th);
1357 INP_INFO_WUNLOCK(&tcbinfo);
1359 in6_pcbnotify(&tcbinfo, sa, 0, (const struct sockaddr *)sa6_src,
1360 0, cmd, NULL, notify);
1366 * Following is where TCP initial sequence number generation occurs.
1368 * There are two places where we must use initial sequence numbers:
1369 * 1. In SYN-ACK packets.
1370 * 2. In SYN packets.
1372 * All ISNs for SYN-ACK packets are generated by the syncache. See
1373 * tcp_syncache.c for details.
1375 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1376 * depends on this property. In addition, these ISNs should be
1377 * unguessable so as to prevent connection hijacking. To satisfy
1378 * the requirements of this situation, the algorithm outlined in
1379 * RFC 1948 is used, with only small modifications.
1381 * Implementation details:
1383 * Time is based off the system timer, and is corrected so that it
1384 * increases by one megabyte per second. This allows for proper
1385 * recycling on high speed LANs while still leaving over an hour
1388 * As reading the *exact* system time is too expensive to be done
1389 * whenever setting up a TCP connection, we increment the time
1390 * offset in two ways. First, a small random positive increment
1391 * is added to isn_offset for each connection that is set up.
1392 * Second, the function tcp_isn_tick fires once per clock tick
1393 * and increments isn_offset as necessary so that sequence numbers
1394 * are incremented at approximately ISN_BYTES_PER_SECOND. The
1395 * random positive increments serve only to ensure that the same
1396 * exact sequence number is never sent out twice (as could otherwise
1397 * happen when a port is recycled in less than the system tick
1400 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1401 * between seeding of isn_secret. This is normally set to zero,
1402 * as reseeding should not be necessary.
1404 * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
1405 * isn_offset_old, and isn_ctx is performed using the TCP pcbinfo lock. In
1406 * general, this means holding an exclusive (write) lock.
1409 #define ISN_BYTES_PER_SECOND 1048576
1410 #define ISN_STATIC_INCREMENT 4096
1411 #define ISN_RANDOM_INCREMENT (4096 - 1)
1413 static u_char isn_secret[32];
1414 static int isn_last_reseed;
1415 static u_int32_t isn_offset, isn_offset_old;
1416 static MD5_CTX isn_ctx;
1419 tcp_new_isn(struct tcpcb *tp)
1421 u_int32_t md5_buffer[4];
1424 INP_LOCK_ASSERT(tp->t_inpcb);
1427 /* Seed if this is the first use, reseed if requested. */
1428 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) &&
1429 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1431 read_random(&isn_secret, sizeof(isn_secret));
1432 isn_last_reseed = ticks;
1435 /* Compute the md5 hash and return the ISN. */
1437 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
1438 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
1440 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
1441 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1442 sizeof(struct in6_addr));
1443 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1444 sizeof(struct in6_addr));
1448 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1449 sizeof(struct in_addr));
1450 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1451 sizeof(struct in_addr));
1453 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1454 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1455 new_isn = (tcp_seq) md5_buffer[0];
1456 isn_offset += ISN_STATIC_INCREMENT +
1457 (arc4random() & ISN_RANDOM_INCREMENT);
1458 new_isn += isn_offset;
1464 * Increment the offset to the next ISN_BYTES_PER_SECOND / hz boundary
1465 * to keep time flowing at a relatively constant rate. If the random
1466 * increments have already pushed us past the projected offset, do nothing.
1469 tcp_isn_tick(void *xtp)
1471 u_int32_t projected_offset;
1474 projected_offset = isn_offset_old + ISN_BYTES_PER_SECOND / 100;
1476 if (projected_offset > isn_offset)
1477 isn_offset = projected_offset;
1479 isn_offset_old = isn_offset;
1480 callout_reset(&isn_callout, hz/100, tcp_isn_tick, NULL);
1485 * When a specific ICMP unreachable message is received and the
1486 * connection state is SYN-SENT, drop the connection. This behavior
1487 * is controlled by the icmp_may_rst sysctl.
1490 tcp_drop_syn_sent(struct inpcb *inp, int errno)
1494 INP_INFO_WLOCK_ASSERT(&tcbinfo);
1495 INP_LOCK_ASSERT(inp);
1497 if ((inp->inp_vflag & INP_TIMEWAIT) ||
1498 (inp->inp_vflag & INP_DROPPED))
1501 tp = intotcpcb(inp);
1502 if (tp->t_state != TCPS_SYN_SENT)
1505 tp = tcp_drop(tp, errno);
1513 * When `need fragmentation' ICMP is received, update our idea of the MSS
1514 * based on the new value in the route. Also nudge TCP to send something,
1515 * since we know the packet we just sent was dropped.
1516 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1519 tcp_mtudisc(struct inpcb *inp, int errno)
1522 struct socket *so = inp->inp_socket;
1530 INP_LOCK_ASSERT(inp);
1531 if ((inp->inp_vflag & INP_TIMEWAIT) ||
1532 (inp->inp_vflag & INP_DROPPED))
1535 tp = intotcpcb(inp);
1536 KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL"));
1539 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
1541 maxmtu = tcp_hc_getmtu(&inp->inp_inc); /* IPv4 and IPv6 */
1544 isipv6 ? tcp_maxmtu6(&inp->inp_inc, NULL) :
1546 tcp_maxmtu(&inp->inp_inc, NULL);
1550 maxmtu = min(maxmtu, romtu);
1552 tp->t_maxopd = tp->t_maxseg =
1554 isipv6 ? tcp_v6mssdflt :
1561 (isipv6 ? sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1563 sizeof(struct tcpiphdr)
1570 * XXX - The above conditional probably violates the TCP
1571 * spec. The problem is that, since we don't know the
1572 * other end's MSS, we are supposed to use a conservative
1573 * default. But, if we do that, then MTU discovery will
1574 * never actually take place, because the conservative
1575 * default is much less than the MTUs typically seen
1576 * on the Internet today. For the moment, we'll sweep
1577 * this under the carpet.
1579 * The conservative default might not actually be a problem
1580 * if the only case this occurs is when sending an initial
1581 * SYN with options and data to a host we've never talked
1582 * to before. Then, they will reply with an MSS value which
1583 * will get recorded and the new parameters should get
1584 * recomputed. For Further Study.
1586 if (tp->t_maxopd <= mss)
1590 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
1591 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
1592 mss -= TCPOLEN_TSTAMP_APPA;
1593 #if (MCLBYTES & (MCLBYTES - 1)) == 0
1595 mss &= ~(MCLBYTES-1);
1598 mss = mss / MCLBYTES * MCLBYTES;
1600 if (so->so_snd.sb_hiwat < mss)
1601 mss = so->so_snd.sb_hiwat;
1605 tcpstat.tcps_mturesent++;
1607 tp->snd_nxt = tp->snd_una;
1608 tcp_free_sackholes(tp);
1609 tp->snd_recover = tp->snd_max;
1610 if (tp->sack_enable)
1611 EXIT_FASTRECOVERY(tp);
1617 * Look-up the routing entry to the peer of this inpcb. If no route
1618 * is found and it cannot be allocated, then return NULL. This routine
1619 * is called by TCP routines that access the rmx structure and by tcp_mss
1620 * to get the interface MTU.
1623 tcp_maxmtu(struct in_conninfo *inc, int *flags)
1626 struct sockaddr_in *dst;
1630 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
1632 bzero(&sro, sizeof(sro));
1633 if (inc->inc_faddr.s_addr != INADDR_ANY) {
1634 dst = (struct sockaddr_in *)&sro.ro_dst;
1635 dst->sin_family = AF_INET;
1636 dst->sin_len = sizeof(*dst);
1637 dst->sin_addr = inc->inc_faddr;
1638 rtalloc_ign(&sro, RTF_CLONING);
1640 if (sro.ro_rt != NULL) {
1641 ifp = sro.ro_rt->rt_ifp;
1642 if (sro.ro_rt->rt_rmx.rmx_mtu == 0)
1643 maxmtu = ifp->if_mtu;
1645 maxmtu = min(sro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu);
1647 /* Report additional interface capabilities. */
1648 if (flags != NULL) {
1649 if (ifp->if_capenable & IFCAP_TSO4 &&
1650 ifp->if_hwassist & CSUM_TSO)
1660 tcp_maxmtu6(struct in_conninfo *inc, int *flags)
1662 struct route_in6 sro6;
1666 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
1668 bzero(&sro6, sizeof(sro6));
1669 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
1670 sro6.ro_dst.sin6_family = AF_INET6;
1671 sro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6);
1672 sro6.ro_dst.sin6_addr = inc->inc6_faddr;
1673 rtalloc_ign((struct route *)&sro6, RTF_CLONING);
1675 if (sro6.ro_rt != NULL) {
1676 ifp = sro6.ro_rt->rt_ifp;
1677 if (sro6.ro_rt->rt_rmx.rmx_mtu == 0)
1678 maxmtu = IN6_LINKMTU(sro6.ro_rt->rt_ifp);
1680 maxmtu = min(sro6.ro_rt->rt_rmx.rmx_mtu,
1681 IN6_LINKMTU(sro6.ro_rt->rt_ifp));
1683 /* Report additional interface capabilities. */
1684 if (flags != NULL) {
1685 if (ifp->if_capenable & IFCAP_TSO6 &&
1686 ifp->if_hwassist & CSUM_TSO)
1697 /* compute ESP/AH header size for TCP, including outer IP header. */
1699 ipsec_hdrsiz_tcp(struct tcpcb *tp)
1706 struct ip6_hdr *ip6;
1710 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1712 MGETHDR(m, M_DONTWAIT, MT_DATA);
1717 if ((inp->inp_vflag & INP_IPV6) != 0) {
1718 ip6 = mtod(m, struct ip6_hdr *);
1719 th = (struct tcphdr *)(ip6 + 1);
1720 m->m_pkthdr.len = m->m_len =
1721 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1722 tcpip_fillheaders(inp, ip6, th);
1723 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1727 ip = mtod(m, struct ip *);
1728 th = (struct tcphdr *)(ip + 1);
1729 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1730 tcpip_fillheaders(inp, ip, th);
1731 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1740 * Move a TCP connection into TIME_WAIT state.
1741 * tcbinfo is locked.
1742 * inp is locked, and is unlocked before returning.
1745 tcp_twstart(struct tcpcb *tp)
1748 struct inpcb *inp = tp->t_inpcb;
1752 INP_INFO_WLOCK_ASSERT(&tcbinfo); /* tcp_timer_2msl_reset(). */
1753 INP_LOCK_ASSERT(inp);
1755 if (nolocaltimewait && in_localip(inp->inp_faddr)) {
1762 tw = uma_zalloc(tcptw_zone, M_NOWAIT);
1764 tw = tcp_timer_2msl_tw(1);
1775 * Recover last window size sent.
1777 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale;
1780 * Set t_recent if timestamps are used on the connection.
1782 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
1783 (TF_REQ_TSTMP|TF_RCVD_TSTMP))
1784 tw->t_recent = tp->ts_recent;
1788 tw->snd_nxt = tp->snd_nxt;
1789 tw->rcv_nxt = tp->rcv_nxt;
1792 tw->t_starttime = tp->t_starttime;
1797 * be used for fin-wait-2 state also, then we may need
1798 * a ts_recent from the last segment.
1800 acknow = tp->t_flags & TF_ACKNOW;
1803 * First, discard tcpcb state, which includes stopping its timers and
1804 * freeing it. tcp_discardcb() used to also release the inpcb, but
1805 * that work is now done in the caller.
1807 * Note: soisdisconnected() call used to be made in tcp_discardcb(),
1808 * and might not be needed here any longer.
1811 so = inp->inp_socket;
1812 soisdisconnected(so);
1814 tw->tw_cred = crhold(so->so_cred);
1815 tw->tw_so_options = so->so_options;
1818 tcp_twrespond(tw, TH_ACK);
1820 inp->inp_vflag |= INP_TIMEWAIT;
1821 tcp_timer_2msl_reset(tw, 0);
1824 * If the inpcb owns the sole reference to the socket, then we can
1825 * detach and free the socket as it is not needed in time wait.
1827 if (inp->inp_vflag & INP_SOCKREF) {
1828 KASSERT(so->so_state & SS_PROTOREF,
1829 ("tcp_twstart: !SS_PROTOREF"));
1830 inp->inp_vflag &= ~INP_SOCKREF;
1834 so->so_state &= ~SS_PROTOREF;
1842 * The appromixate rate of ISN increase of Microsoft TCP stacks;
1843 * the actual rate is slightly higher due to the addition of
1844 * random positive increments.
1846 * Most other new OSes use semi-randomized ISN values, so we
1847 * do not need to worry about them.
1849 #define MS_ISN_BYTES_PER_SECOND 250000
1852 * Determine if the ISN we will generate has advanced beyond the last
1853 * sequence number used by the previous connection. If so, indicate
1854 * that it is safe to recycle this tw socket by returning 1.
1857 tcp_twrecycleable(struct tcptw *tw)
1859 tcp_seq new_iss = tw->iss;
1860 tcp_seq new_irs = tw->irs;
1862 INP_INFO_WLOCK_ASSERT(&tcbinfo);
1863 new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz);
1864 new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz);
1866 if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt))
1874 tcp_twclose(struct tcptw *tw, int reuse)
1880 * At this point, we are in one of two situations:
1882 * (1) We have no socket, just an inpcb<->twtcp pair. We can free
1885 * (2) We have a socket -- if we own a reference, release it and
1886 * notify the socket layer.
1889 KASSERT((inp->inp_vflag & INP_TIMEWAIT), ("tcp_twclose: !timewait"));
1890 KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw"));
1891 INP_INFO_WLOCK_ASSERT(&tcbinfo); /* tcp_timer_2msl_stop(). */
1892 INP_LOCK_ASSERT(inp);
1894 tw->tw_inpcb = NULL;
1895 tcp_timer_2msl_stop(tw);
1896 inp->inp_ppcb = NULL;
1899 so = inp->inp_socket;
1902 * If there's a socket, handle two cases: first, we own a
1903 * strong reference, which we will now release, or we don't
1904 * in which case another reference exists (XXXRW: think
1905 * about this more), and we don't need to take action.
1907 if (inp->inp_vflag & INP_SOCKREF) {
1908 inp->inp_vflag &= ~INP_SOCKREF;
1912 KASSERT(so->so_state & SS_PROTOREF,
1913 ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF"));
1914 so->so_state &= ~SS_PROTOREF;
1918 * If we don't own the only reference, the socket and
1919 * inpcb need to be left around to be handled by
1920 * tcp_usr_detach() later.
1926 if (inp->inp_vflag & INP_IPV6PROTO)
1932 tcpstat.tcps_closed++;
1933 crfree(tw->tw_cred);
1937 uma_zfree(tcptw_zone, tw);
1941 tcp_twrespond(struct tcptw *tw, int flags)
1943 struct inpcb *inp = tw->tw_inpcb;
1946 struct ip *ip = NULL;
1947 u_int hdrlen, optlen;
1951 struct ip6_hdr *ip6 = NULL;
1952 int isipv6 = inp->inp_inc.inc_isipv6;
1955 INP_LOCK_ASSERT(inp);
1957 m = m_gethdr(M_DONTWAIT, MT_DATA);
1960 m->m_data += max_linkhdr;
1963 mac_create_mbuf_from_inpcb(inp, m);
1968 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1969 ip6 = mtod(m, struct ip6_hdr *);
1970 th = (struct tcphdr *)(ip6 + 1);
1971 tcpip_fillheaders(inp, ip6, th);
1975 hdrlen = sizeof(struct tcpiphdr);
1976 ip = mtod(m, struct ip *);
1977 th = (struct tcphdr *)(ip + 1);
1978 tcpip_fillheaders(inp, ip, th);
1983 * Send a timestamp and echo-reply if both our side and our peer
1984 * have sent timestamps in our SYN's and this is not a RST.
1986 if (tw->t_recent && flags == TH_ACK) {
1987 to.to_flags |= TOF_TS;
1988 to.to_tsval = ticks;
1989 to.to_tsecr = tw->t_recent;
1991 optlen = tcp_addoptions(&to, (u_char *)(th + 1));
1993 m->m_len = hdrlen + optlen;
1994 m->m_pkthdr.len = m->m_len;
1996 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
1998 th->th_seq = htonl(tw->snd_nxt);
1999 th->th_ack = htonl(tw->rcv_nxt);
2000 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
2001 th->th_flags = flags;
2002 th->th_win = htons(tw->last_win);
2006 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
2007 sizeof(struct tcphdr) + optlen);
2008 ip6->ip6_hlim = in6_selecthlim(inp, NULL);
2009 error = ip6_output(m, inp->in6p_outputopts, NULL,
2010 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
2014 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
2015 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
2016 m->m_pkthdr.csum_flags = CSUM_TCP;
2017 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
2018 ip->ip_len = m->m_pkthdr.len;
2019 if (path_mtu_discovery)
2020 ip->ip_off |= IP_DF;
2021 error = ip_output(m, inp->inp_options, NULL,
2022 ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
2026 tcpstat.tcps_sndacks++;
2028 tcpstat.tcps_sndctrl++;
2029 tcpstat.tcps_sndtotal++;
2034 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
2036 * This code attempts to calculate the bandwidth-delay product as a
2037 * means of determining the optimal window size to maximize bandwidth,
2038 * minimize RTT, and avoid the over-allocation of buffers on interfaces and
2039 * routers. This code also does a fairly good job keeping RTTs in check
2040 * across slow links like modems. We implement an algorithm which is very
2041 * similar (but not meant to be) TCP/Vegas. The code operates on the
2042 * transmitter side of a TCP connection and so only effects the transmit
2043 * side of the connection.
2045 * BACKGROUND: TCP makes no provision for the management of buffer space
2046 * at the end points or at the intermediate routers and switches. A TCP
2047 * stream, whether using NewReno or not, will eventually buffer as
2048 * many packets as it is able and the only reason this typically works is
2049 * due to the fairly small default buffers made available for a connection
2050 * (typicaly 16K or 32K). As machines use larger windows and/or window
2051 * scaling it is now fairly easy for even a single TCP connection to blow-out
2052 * all available buffer space not only on the local interface, but on
2053 * intermediate routers and switches as well. NewReno makes a misguided
2054 * attempt to 'solve' this problem by waiting for an actual failure to occur,
2055 * then backing off, then steadily increasing the window again until another
2056 * failure occurs, ad-infinitum. This results in terrible oscillation that
2057 * is only made worse as network loads increase and the idea of intentionally
2058 * blowing out network buffers is, frankly, a terrible way to manage network
2061 * It is far better to limit the transmit window prior to the failure
2062 * condition being achieved. There are two general ways to do this: First
2063 * you can 'scan' through different transmit window sizes and locate the
2064 * point where the RTT stops increasing, indicating that you have filled the
2065 * pipe, then scan backwards until you note that RTT stops decreasing, then
2066 * repeat ad-infinitum. This method works in principle but has severe
2067 * implementation issues due to RTT variances, timer granularity, and
2068 * instability in the algorithm which can lead to many false positives and
2069 * create oscillations as well as interact badly with other TCP streams
2070 * implementing the same algorithm.
2072 * The second method is to limit the window to the bandwidth delay product
2073 * of the link. This is the method we implement. RTT variances and our
2074 * own manipulation of the congestion window, bwnd, can potentially
2075 * destabilize the algorithm. For this reason we have to stabilize the
2076 * elements used to calculate the window. We do this by using the minimum
2077 * observed RTT, the long term average of the observed bandwidth, and
2078 * by adding two segments worth of slop. It isn't perfect but it is able
2079 * to react to changing conditions and gives us a very stable basis on
2080 * which to extend the algorithm.
2083 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
2089 INP_LOCK_ASSERT(tp->t_inpcb);
2092 * If inflight_enable is disabled in the middle of a tcp connection,
2093 * make sure snd_bwnd is effectively disabled.
2095 if (tcp_inflight_enable == 0 || tp->t_rttlow < tcp_inflight_rttthresh) {
2096 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
2097 tp->snd_bandwidth = 0;
2102 * Figure out the bandwidth. Due to the tick granularity this
2103 * is a very rough number and it MUST be averaged over a fairly
2104 * long period of time. XXX we need to take into account a link
2105 * that is not using all available bandwidth, but for now our
2106 * slop will ramp us up if this case occurs and the bandwidth later
2109 * Note: if ticks rollover 'bw' may wind up negative. We must
2110 * effectively reset t_bw_rtttime for this case.
2113 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1)
2116 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz /
2117 (save_ticks - tp->t_bw_rtttime);
2118 tp->t_bw_rtttime = save_ticks;
2119 tp->t_bw_rtseq = ack_seq;
2120 if (tp->t_bw_rtttime == 0 || (int)bw < 0)
2122 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4;
2124 tp->snd_bandwidth = bw;
2127 * Calculate the semi-static bandwidth delay product, plus two maximal
2128 * segments. The additional slop puts us squarely in the sweet
2129 * spot and also handles the bandwidth run-up case and stabilization.
2130 * Without the slop we could be locking ourselves into a lower
2133 * Situations Handled:
2134 * (1) Prevents over-queueing of packets on LANs, especially on
2135 * high speed LANs, allowing larger TCP buffers to be
2136 * specified, and also does a good job preventing
2137 * over-queueing of packets over choke points like modems
2138 * (at least for the transmit side).
2140 * (2) Is able to handle changing network loads (bandwidth
2141 * drops so bwnd drops, bandwidth increases so bwnd
2144 * (3) Theoretically should stabilize in the face of multiple
2145 * connections implementing the same algorithm (this may need
2148 * (4) Stability value (defaults to 20 = 2 maximal packets) can
2149 * be adjusted with a sysctl but typically only needs to be
2150 * on very slow connections. A value no smaller then 5
2151 * should be used, but only reduce this default if you have
2154 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2)
2155 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * tp->t_maxseg / 10;
2158 if (tcp_inflight_debug > 0) {
2160 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) {
2162 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n",
2171 if ((long)bwnd < tcp_inflight_min)
2172 bwnd = tcp_inflight_min;
2173 if (bwnd > tcp_inflight_max)
2174 bwnd = tcp_inflight_max;
2175 if ((long)bwnd < tp->t_maxseg * 2)
2176 bwnd = tp->t_maxseg * 2;
2177 tp->snd_bwnd = bwnd;
2180 #ifdef TCP_SIGNATURE
2182 * Callback function invoked by m_apply() to digest TCP segment data
2183 * contained within an mbuf chain.
2186 tcp_signature_apply(void *fstate, void *data, u_int len)
2189 MD5Update(fstate, (u_char *)data, len);
2194 * Compute TCP-MD5 hash of a TCPv4 segment. (RFC2385)
2197 * m pointer to head of mbuf chain
2198 * off0 offset to TCP header within the mbuf chain
2199 * len length of TCP segment data, excluding options
2200 * optlen length of TCP segment options
2201 * buf pointer to storage for computed MD5 digest
2202 * direction direction of flow (IPSEC_DIR_INBOUND or OUTBOUND)
2204 * We do this over ip, tcphdr, segment data, and the key in the SADB.
2205 * When called from tcp_input(), we can be sure that th_sum has been
2206 * zeroed out and verified already.
2208 * This function is for IPv4 use only. Calling this function with an
2209 * IPv6 packet in the mbuf chain will yield undefined results.
2211 * Return 0 if successful, otherwise return -1.
2213 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
2214 * search with the destination IP address, and a 'magic SPI' to be
2215 * determined by the application. This is hardcoded elsewhere to 1179
2216 * right now. Another branch of this code exists which uses the SPD to
2217 * specify per-application flows but it is unstable.
2220 tcp_signature_compute(struct mbuf *m, int off0, int len, int optlen,
2221 u_char *buf, u_int direction)
2223 union sockaddr_union dst;
2224 struct ippseudo ippseudo;
2228 struct ipovly *ipovly;
2229 struct secasvar *sav;
2233 KASSERT(m != NULL, ("NULL mbuf chain"));
2234 KASSERT(buf != NULL, ("NULL signature pointer"));
2236 /* Extract the destination from the IP header in the mbuf. */
2237 ip = mtod(m, struct ip *);
2238 bzero(&dst, sizeof(union sockaddr_union));
2239 dst.sa.sa_len = sizeof(struct sockaddr_in);
2240 dst.sa.sa_family = AF_INET;
2241 dst.sin.sin_addr = (direction == IPSEC_DIR_INBOUND) ?
2242 ip->ip_src : ip->ip_dst;
2244 /* Look up an SADB entry which matches the address of the peer. */
2245 sav = KEY_ALLOCSA(&dst, IPPROTO_TCP, htonl(TCP_SIG_SPI));
2247 printf("%s: SADB lookup failed for %s\n", __func__,
2248 inet_ntoa(dst.sin.sin_addr));
2253 ipovly = (struct ipovly *)ip;
2254 th = (struct tcphdr *)((u_char *)ip + off0);
2255 doff = off0 + sizeof(struct tcphdr) + optlen;
2258 * Step 1: Update MD5 hash with IP pseudo-header.
2260 * XXX The ippseudo header MUST be digested in network byte order,
2261 * or else we'll fail the regression test. Assume all fields we've
2262 * been doing arithmetic on have been in host byte order.
2263 * XXX One cannot depend on ipovly->ih_len here. When called from
2264 * tcp_output(), the underlying ip_len member has not yet been set.
2266 ippseudo.ippseudo_src = ipovly->ih_src;
2267 ippseudo.ippseudo_dst = ipovly->ih_dst;
2268 ippseudo.ippseudo_pad = 0;
2269 ippseudo.ippseudo_p = IPPROTO_TCP;
2270 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen);
2271 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo));
2274 * Step 2: Update MD5 hash with TCP header, excluding options.
2275 * The TCP checksum must be set to zero.
2277 savecsum = th->th_sum;
2279 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr));
2280 th->th_sum = savecsum;
2283 * Step 3: Update MD5 hash with TCP segment data.
2284 * Use m_apply() to avoid an early m_pullup().
2287 m_apply(m, doff, len, tcp_signature_apply, &ctx);
2290 * Step 4: Update MD5 hash with shared secret.
2292 MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth));
2293 MD5Final(buf, &ctx);
2295 key_sa_recordxfer(sav, m);
2299 #endif /* TCP_SIGNATURE */
2302 sysctl_drop(SYSCTL_HANDLER_ARGS)
2304 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
2305 struct sockaddr_storage addrs[2];
2309 struct sockaddr_in *fin, *lin;
2311 struct sockaddr_in6 *fin6, *lin6;
2312 struct in6_addr f6, l6;
2323 if (req->oldptr != NULL || req->oldlen != 0)
2325 if (req->newptr == NULL)
2327 if (req->newlen < sizeof(addrs))
2329 error = SYSCTL_IN(req, &addrs, sizeof(addrs));
2333 switch (addrs[0].ss_family) {
2336 fin6 = (struct sockaddr_in6 *)&addrs[0];
2337 lin6 = (struct sockaddr_in6 *)&addrs[1];
2338 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
2339 lin6->sin6_len != sizeof(struct sockaddr_in6))
2341 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
2342 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
2344 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
2345 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
2346 fin = (struct sockaddr_in *)&addrs[0];
2347 lin = (struct sockaddr_in *)&addrs[1];
2350 error = sa6_embedscope(fin6, ip6_use_defzone);
2353 error = sa6_embedscope(lin6, ip6_use_defzone);
2359 fin = (struct sockaddr_in *)&addrs[0];
2360 lin = (struct sockaddr_in *)&addrs[1];
2361 if (fin->sin_len != sizeof(struct sockaddr_in) ||
2362 lin->sin_len != sizeof(struct sockaddr_in))
2368 INP_INFO_WLOCK(&tcbinfo);
2369 switch (addrs[0].ss_family) {
2372 inp = in6_pcblookup_hash(&tcbinfo, &f6, fin6->sin6_port,
2373 &l6, lin6->sin6_port, 0, NULL);
2377 inp = in_pcblookup_hash(&tcbinfo, fin->sin_addr, fin->sin_port,
2378 lin->sin_addr, lin->sin_port, 0, NULL);
2383 if (inp->inp_vflag & INP_TIMEWAIT) {
2385 * XXXRW: There currently exists a state where an
2386 * inpcb is present, but its timewait state has been
2387 * discarded. For now, don't allow dropping of this
2393 } else if (!(inp->inp_vflag & INP_DROPPED) &&
2394 !(inp->inp_socket->so_options & SO_ACCEPTCONN)) {
2395 tp = intotcpcb(inp);
2396 tcp_drop(tp, ECONNABORTED);
2401 INP_INFO_WUNLOCK(&tcbinfo);
2405 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop,
2406 CTLTYPE_STRUCT|CTLFLAG_WR|CTLFLAG_SKIP, NULL,
2407 0, sysctl_drop, "", "Drop TCP connection");