2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_compat.h"
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
39 #include "opt_tcpdebug.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/hhook.h>
45 #include <sys/kernel.h>
46 #include <sys/khelp.h>
47 #include <sys/sysctl.h>
49 #include <sys/malloc.h>
52 #include <sys/domain.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/protosw.h>
59 #include <sys/random.h>
63 #include <net/route.h>
67 #include <netinet/cc.h>
68 #include <netinet/in.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/ip.h>
72 #include <netinet/ip6.h>
74 #include <netinet/in_pcb.h>
76 #include <netinet6/in6_pcb.h>
78 #include <netinet/in_var.h>
79 #include <netinet/ip_var.h>
81 #include <netinet6/ip6_var.h>
82 #include <netinet6/scope6_var.h>
83 #include <netinet6/nd6.h>
85 #include <netinet/ip_icmp.h>
86 #include <netinet/tcp_fsm.h>
87 #include <netinet/tcp_seq.h>
88 #include <netinet/tcp_timer.h>
89 #include <netinet/tcp_var.h>
90 #include <netinet/tcp_syncache.h>
91 #include <netinet/tcp_offload.h>
93 #include <netinet6/tcp6_var.h>
95 #include <netinet/tcpip.h>
97 #include <netinet/tcp_debug.h>
99 #include <netinet6/ip6protosw.h>
102 #include <netipsec/ipsec.h>
103 #include <netipsec/xform.h>
105 #include <netipsec/ipsec6.h>
107 #include <netipsec/key.h>
108 #include <sys/syslog.h>
111 #include <machine/in_cksum.h>
114 #include <security/mac/mac_framework.h>
116 VNET_DEFINE(int, tcp_mssdflt) = TCP_MSS;
118 VNET_DEFINE(int, tcp_v6mssdflt) = TCP6_MSS;
122 sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS)
127 error = sysctl_handle_int(oidp, &new, 0, req);
128 if (error == 0 && req->newptr) {
129 if (new < TCP_MINMSS)
137 SYSCTL_VNET_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt,
138 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(tcp_mssdflt), 0,
139 &sysctl_net_inet_tcp_mss_check, "I",
140 "Default TCP Maximum Segment Size");
144 sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS)
148 new = V_tcp_v6mssdflt;
149 error = sysctl_handle_int(oidp, &new, 0, req);
150 if (error == 0 && req->newptr) {
151 if (new < TCP_MINMSS)
154 V_tcp_v6mssdflt = new;
159 SYSCTL_VNET_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
160 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(tcp_v6mssdflt), 0,
161 &sysctl_net_inet_tcp_mss_v6_check, "I",
162 "Default TCP Maximum Segment Size for IPv6");
166 vnet_sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS)
169 VNET_SYSCTL_ARG(req, arg1);
170 return (sysctl_msec_to_ticks(oidp, arg1, arg2, req));
174 * Minimum MSS we accept and use. This prevents DoS attacks where
175 * we are forced to a ridiculous low MSS like 20 and send hundreds
176 * of packets instead of one. The effect scales with the available
177 * bandwidth and quickly saturates the CPU and network interface
178 * with packet generation and sending. Set to zero to disable MINMSS
179 * checking. This setting prevents us from sending too small packets.
181 VNET_DEFINE(int, tcp_minmss) = TCP_MINMSS;
182 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
183 &VNET_NAME(tcp_minmss), 0,
184 "Minmum TCP Maximum Segment Size");
186 VNET_DEFINE(int, tcp_do_rfc1323) = 1;
187 SYSCTL_VNET_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
188 &VNET_NAME(tcp_do_rfc1323), 0,
189 "Enable rfc1323 (high performance TCP) extensions");
191 static int tcp_log_debug = 0;
192 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW,
193 &tcp_log_debug, 0, "Log errors caused by incoming TCP segments");
195 static int tcp_tcbhashsize = 0;
196 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN,
197 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
199 static int do_tcpdrain = 1;
200 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
201 "Enable tcp_drain routine for extra help when low on mbufs");
203 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
204 &VNET_NAME(tcbinfo.ipi_count), 0, "Number of active PCBs");
206 static VNET_DEFINE(int, icmp_may_rst) = 1;
207 #define V_icmp_may_rst VNET(icmp_may_rst)
208 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW,
209 &VNET_NAME(icmp_may_rst), 0,
210 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
212 static VNET_DEFINE(int, tcp_isn_reseed_interval) = 0;
213 #define V_tcp_isn_reseed_interval VNET(tcp_isn_reseed_interval)
214 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
215 &VNET_NAME(tcp_isn_reseed_interval), 0,
216 "Seconds between reseeding of ISN secret");
219 * TCP bandwidth limiting sysctls. Note that the default lower bound of
220 * 1024 exists only for debugging. A good production default would be
221 * something like 6100.
223 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, inflight, CTLFLAG_RW, 0,
224 "TCP inflight data limiting");
226 static VNET_DEFINE(int, tcp_inflight_enable) = 0;
227 #define V_tcp_inflight_enable VNET(tcp_inflight_enable)
228 SYSCTL_VNET_INT(_net_inet_tcp_inflight, OID_AUTO, enable, CTLFLAG_RW,
229 &VNET_NAME(tcp_inflight_enable), 0,
230 "Enable automatic TCP inflight data limiting");
232 static int tcp_inflight_debug = 0;
233 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, debug, CTLFLAG_RW,
234 &tcp_inflight_debug, 0,
235 "Debug TCP inflight calculations");
237 static VNET_DEFINE(int, tcp_inflight_rttthresh);
238 #define V_tcp_inflight_rttthresh VNET(tcp_inflight_rttthresh)
239 SYSCTL_VNET_PROC(_net_inet_tcp_inflight, OID_AUTO, rttthresh,
240 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(tcp_inflight_rttthresh), 0,
241 vnet_sysctl_msec_to_ticks, "I",
242 "RTT threshold below which inflight will deactivate itself");
244 static VNET_DEFINE(int, tcp_inflight_min) = 6144;
245 #define V_tcp_inflight_min VNET(tcp_inflight_min)
246 SYSCTL_VNET_INT(_net_inet_tcp_inflight, OID_AUTO, min, CTLFLAG_RW,
247 &VNET_NAME(tcp_inflight_min), 0,
248 "Lower-bound for TCP inflight window");
250 static VNET_DEFINE(int, tcp_inflight_max) = TCP_MAXWIN << TCP_MAX_WINSHIFT;
251 #define V_tcp_inflight_max VNET(tcp_inflight_max)
252 SYSCTL_VNET_INT(_net_inet_tcp_inflight, OID_AUTO, max, CTLFLAG_RW,
253 &VNET_NAME(tcp_inflight_max), 0,
254 "Upper-bound for TCP inflight window");
256 static VNET_DEFINE(int, tcp_inflight_stab) = 20;
257 #define V_tcp_inflight_stab VNET(tcp_inflight_stab)
258 SYSCTL_VNET_INT(_net_inet_tcp_inflight, OID_AUTO, stab, CTLFLAG_RW,
259 &VNET_NAME(tcp_inflight_stab), 0,
260 "Inflight Algorithm Stabilization 20 = 2 packets");
263 static int tcp_sig_checksigs = 1;
264 SYSCTL_INT(_net_inet_tcp, OID_AUTO, signature_verify_input, CTLFLAG_RW,
265 &tcp_sig_checksigs, 0, "Verify RFC2385 digests on inbound traffic");
268 VNET_DEFINE(uma_zone_t, sack_hole_zone);
269 #define V_sack_hole_zone VNET(sack_hole_zone)
271 VNET_DEFINE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST+1]);
273 static struct inpcb *tcp_notify(struct inpcb *, int);
274 static void tcp_isn_tick(void *);
275 static char * tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th,
276 void *ip4hdr, const void *ip6hdr);
279 * Target size of TCP PCB hash tables. Must be a power of two.
281 * Note that this can be overridden by the kernel environment
282 * variable net.inet.tcp.tcbhashsize
285 #define TCBHASHSIZE 512
290 * Callouts should be moved into struct tcp directly. They are currently
291 * separate because the tcpcb structure is exported to userland for sysctl
292 * parsing purposes, which do not know about callouts.
301 static VNET_DEFINE(uma_zone_t, tcpcb_zone);
302 #define V_tcpcb_zone VNET(tcpcb_zone)
304 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers");
305 struct callout isn_callout;
306 static struct mtx isn_mtx;
308 #define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
309 #define ISN_LOCK() mtx_lock(&isn_mtx)
310 #define ISN_UNLOCK() mtx_unlock(&isn_mtx)
313 * TCP initialization.
316 tcp_zone_change(void *tag)
319 uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets);
320 uma_zone_set_max(V_tcpcb_zone, maxsockets);
321 tcp_tw_zone_change();
325 tcp_inpcb_init(void *mem, int size, int flags)
327 struct inpcb *inp = mem;
329 INP_LOCK_INIT(inp, "inp", "tcpinp");
338 INP_INFO_LOCK_INIT(&V_tcbinfo, "tcp");
341 V_tcbinfo.ipi_vnet = curvnet;
343 V_tcbinfo.ipi_listhead = &V_tcb;
345 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN,
346 &V_tcp_hhh[HHOOK_TCP_EST_IN], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
347 printf("%s: WARNING: unable to register helper hook\n", __func__);
348 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT,
349 &V_tcp_hhh[HHOOK_TCP_EST_OUT], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
350 printf("%s: WARNING: unable to register helper hook\n", __func__);
352 hashsize = TCBHASHSIZE;
353 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
354 if (!powerof2(hashsize)) {
355 printf("WARNING: TCB hash size not a power of 2\n");
356 hashsize = 512; /* safe default */
358 V_tcbinfo.ipi_hashbase = hashinit(hashsize, M_PCB,
359 &V_tcbinfo.ipi_hashmask);
360 V_tcbinfo.ipi_porthashbase = hashinit(hashsize, M_PCB,
361 &V_tcbinfo.ipi_porthashmask);
362 V_tcbinfo.ipi_zone = uma_zcreate("tcp_inpcb", sizeof(struct inpcb),
363 NULL, NULL, tcp_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
364 uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets);
365 V_tcp_inflight_rttthresh = TCPTV_INFLIGHT_RTTTHRESH;
368 * These have to be type stable for the benefit of the timers.
370 V_tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
371 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
372 uma_zone_set_max(V_tcpcb_zone, maxsockets);
379 TUNABLE_INT_FETCH("net.inet.tcp.sack.enable", &V_tcp_do_sack);
380 V_sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
381 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
383 /* Skip initialization of globals for non-default instances. */
384 if (!IS_DEFAULT_VNET(curvnet))
387 /* XXX virtualize those bellow? */
388 tcp_delacktime = TCPTV_DELACK;
389 tcp_keepinit = TCPTV_KEEP_INIT;
390 tcp_keepidle = TCPTV_KEEP_IDLE;
391 tcp_keepintvl = TCPTV_KEEPINTVL;
392 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
394 tcp_rexmit_min = TCPTV_MIN;
395 if (tcp_rexmit_min < 1)
397 tcp_rexmit_slop = TCPTV_CPU_VAR;
398 tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT;
399 tcp_tcbhashsize = hashsize;
402 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
404 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
406 if (max_protohdr < TCP_MINPROTOHDR)
407 max_protohdr = TCP_MINPROTOHDR;
408 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
410 #undef TCP_MINPROTOHDR
413 callout_init(&isn_callout, CALLOUT_MPSAFE);
414 callout_reset(&isn_callout, hz/100, tcp_isn_tick, NULL);
415 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
416 SHUTDOWN_PRI_DEFAULT);
417 EVENTHANDLER_REGISTER(maxsockets_change, tcp_zone_change, NULL,
418 EVENTHANDLER_PRI_ANY);
431 /* XXX check that hashes are empty! */
432 hashdestroy(V_tcbinfo.ipi_hashbase, M_PCB,
433 V_tcbinfo.ipi_hashmask);
434 hashdestroy(V_tcbinfo.ipi_porthashbase, M_PCB,
435 V_tcbinfo.ipi_porthashmask);
437 uma_zdestroy(V_sack_hole_zone);
438 uma_zdestroy(V_tcpcb_zone);
439 uma_zdestroy(V_tcbinfo.ipi_zone);
441 INP_INFO_LOCK_DESTROY(&V_tcbinfo);
449 callout_stop(&isn_callout);
453 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
454 * tcp_template used to store this data in mbufs, but we now recopy it out
455 * of the tcpcb each time to conserve mbufs.
458 tcpip_fillheaders(struct inpcb *inp, void *ip_ptr, void *tcp_ptr)
460 struct tcphdr *th = (struct tcphdr *)tcp_ptr;
462 INP_WLOCK_ASSERT(inp);
465 if ((inp->inp_vflag & INP_IPV6) != 0) {
468 ip6 = (struct ip6_hdr *)ip_ptr;
469 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
470 (inp->inp_flow & IPV6_FLOWINFO_MASK);
471 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
472 (IPV6_VERSION & IPV6_VERSION_MASK);
473 ip6->ip6_nxt = IPPROTO_TCP;
474 ip6->ip6_plen = htons(sizeof(struct tcphdr));
475 ip6->ip6_src = inp->in6p_laddr;
476 ip6->ip6_dst = inp->in6p_faddr;
482 ip = (struct ip *)ip_ptr;
483 ip->ip_v = IPVERSION;
485 ip->ip_tos = inp->inp_ip_tos;
489 ip->ip_ttl = inp->inp_ip_ttl;
491 ip->ip_p = IPPROTO_TCP;
492 ip->ip_src = inp->inp_laddr;
493 ip->ip_dst = inp->inp_faddr;
495 th->th_sport = inp->inp_lport;
496 th->th_dport = inp->inp_fport;
504 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */
508 * Create template to be used to send tcp packets on a connection.
509 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
510 * use for this function is in keepalives, which use tcp_respond.
513 tcpip_maketemplate(struct inpcb *inp)
517 t = malloc(sizeof(*t), M_TEMP, M_NOWAIT);
520 tcpip_fillheaders(inp, (void *)&t->tt_ipgen, (void *)&t->tt_t);
525 * Send a single message to the TCP at address specified by
526 * the given TCP/IP header. If m == NULL, then we make a copy
527 * of the tcpiphdr at ti and send directly to the addressed host.
528 * This is used to force keep alive messages out using the TCP
529 * template for a connection. If flags are given then we send
530 * a message back to the TCP which originated the * segment ti,
531 * and discard the mbuf containing it and any other attached mbufs.
533 * In any case the ack and sequence number of the transmitted
534 * segment are as specified by the parameters.
536 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
539 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
540 tcp_seq ack, tcp_seq seq, int flags)
553 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
556 isipv6 = ((struct ip *)ipgen)->ip_v == 6;
563 KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
564 INP_WLOCK_ASSERT(inp);
569 if (!(flags & TH_RST)) {
570 win = sbspace(&inp->inp_socket->so_rcv);
571 if (win > (long)TCP_MAXWIN << tp->rcv_scale)
572 win = (long)TCP_MAXWIN << tp->rcv_scale;
576 m = m_gethdr(M_DONTWAIT, MT_DATA);
580 m->m_data += max_linkhdr;
583 bcopy((caddr_t)ip6, mtod(m, caddr_t),
584 sizeof(struct ip6_hdr));
585 ip6 = mtod(m, struct ip6_hdr *);
586 nth = (struct tcphdr *)(ip6 + 1);
590 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
591 ip = mtod(m, struct ip *);
592 nth = (struct tcphdr *)(ip + 1);
594 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
599 * XXX MRT We inherrit the FIB, which is lucky.
603 m->m_data = (caddr_t)ipgen;
604 /* m_len is set later */
606 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
609 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
610 nth = (struct tcphdr *)(ip6 + 1);
614 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
615 nth = (struct tcphdr *)(ip + 1);
619 * this is usually a case when an extension header
620 * exists between the IPv6 header and the
623 nth->th_sport = th->th_sport;
624 nth->th_dport = th->th_dport;
626 xchg(nth->th_dport, nth->th_sport, uint16_t);
632 ip6->ip6_vfc = IPV6_VERSION;
633 ip6->ip6_nxt = IPPROTO_TCP;
634 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
636 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
640 tlen += sizeof (struct tcpiphdr);
642 ip->ip_ttl = V_ip_defttl;
643 if (V_path_mtu_discovery)
647 m->m_pkthdr.len = tlen;
648 m->m_pkthdr.rcvif = NULL;
652 * Packet is associated with a socket, so allow the
653 * label of the response to reflect the socket label.
655 INP_WLOCK_ASSERT(inp);
656 mac_inpcb_create_mbuf(inp, m);
659 * Packet is not associated with a socket, so possibly
660 * update the label in place.
662 mac_netinet_tcp_reply(m);
665 nth->th_seq = htonl(seq);
666 nth->th_ack = htonl(ack);
668 nth->th_off = sizeof (struct tcphdr) >> 2;
669 nth->th_flags = flags;
671 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
673 nth->th_win = htons((u_short)win);
678 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
679 sizeof(struct ip6_hdr),
680 tlen - sizeof(struct ip6_hdr));
681 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
686 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
687 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
688 m->m_pkthdr.csum_flags = CSUM_TCP;
689 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
692 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
693 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
697 (void) ip6_output(m, NULL, NULL, ipflags, NULL, NULL, inp);
700 (void) ip_output(m, NULL, NULL, ipflags, NULL, inp);
704 * Create a new TCP control block, making an
705 * empty reassembly queue and hooking it to the argument
706 * protocol control block. The `inp' parameter must have
707 * come from the zone allocator set up in tcp_init().
710 tcp_newtcpcb(struct inpcb *inp)
712 struct tcpcb_mem *tm;
715 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
718 tm = uma_zalloc(V_tcpcb_zone, M_NOWAIT | M_ZERO);
723 /* Initialise cc_var struct for this tcpcb. */
725 tp->ccv->type = IPPROTO_TCP;
726 tp->ccv->ccvc.tcp = tp;
729 * Use the current system default CC algorithm.
732 KASSERT(!STAILQ_EMPTY(&cc_list), ("cc_list is empty!"));
733 CC_ALGO(tp) = CC_DEFAULT();
736 if (CC_ALGO(tp)->cb_init != NULL)
737 if (CC_ALGO(tp)->cb_init(tp->ccv) > 0) {
738 uma_zfree(V_tcpcb_zone, tm);
743 if (khelp_init_osd(HELPER_CLASS_TCP, tp->osd)) {
744 uma_zfree(V_tcpcb_zone, tm);
749 tp->t_vnet = inp->inp_vnet;
751 tp->t_timers = &tm->tt;
752 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */
753 tp->t_maxseg = tp->t_maxopd =
755 isipv6 ? V_tcp_v6mssdflt :
759 /* Set up our timeouts. */
760 callout_init(&tp->t_timers->tt_rexmt, CALLOUT_MPSAFE);
761 callout_init(&tp->t_timers->tt_persist, CALLOUT_MPSAFE);
762 callout_init(&tp->t_timers->tt_keep, CALLOUT_MPSAFE);
763 callout_init(&tp->t_timers->tt_2msl, CALLOUT_MPSAFE);
764 callout_init(&tp->t_timers->tt_delack, CALLOUT_MPSAFE);
766 if (V_tcp_do_rfc1323)
767 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
769 tp->t_flags |= TF_SACK_PERMIT;
770 TAILQ_INIT(&tp->snd_holes);
771 tp->t_inpcb = inp; /* XXX */
773 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
774 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
775 * reasonable initial retransmit time.
777 tp->t_srtt = TCPTV_SRTTBASE;
778 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
779 tp->t_rttmin = tcp_rexmit_min;
780 tp->t_rxtcur = TCPTV_RTOBASE;
781 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
782 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
783 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
784 tp->t_rcvtime = ticks;
785 tp->t_bw_rtttime = ticks;
787 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
788 * because the socket may be bound to an IPv6 wildcard address,
789 * which may match an IPv4-mapped IPv6 address.
791 inp->inp_ip_ttl = V_ip_defttl;
793 return (tp); /* XXX */
797 * Switch the congestion control algorithm back to NewReno for any active
798 * control blocks using an algorithm which is about to go away.
799 * This ensures the CC framework can allow the unload to proceed without leaving
800 * any dangling pointers which would trigger a panic.
801 * Returning non-zero would inform the CC framework that something went wrong
802 * and it would be unsafe to allow the unload to proceed. However, there is no
803 * way for this to occur with this implementation so we always return zero.
806 tcp_ccalgounload(struct cc_algo *unload_algo)
808 struct cc_algo *tmpalgo;
811 VNET_ITERATOR_DECL(vnet_iter);
814 * Check all active control blocks across all network stacks and change
815 * any that are using "unload_algo" back to NewReno. If "unload_algo"
816 * requires cleanup code to be run, call it.
819 VNET_FOREACH(vnet_iter) {
820 CURVNET_SET(vnet_iter);
821 INP_INFO_RLOCK(&V_tcbinfo);
823 * New connections already part way through being initialised
824 * with the CC algo we're removing will not race with this code
825 * because the INP_INFO_WLOCK is held during initialisation. We
826 * therefore don't enter the loop below until the connection
827 * list has stabilised.
829 LIST_FOREACH(inp, &V_tcb, inp_list) {
831 /* Important to skip tcptw structs. */
832 if (!(inp->inp_flags & INP_TIMEWAIT) &&
833 (tp = intotcpcb(inp)) != NULL) {
835 * By holding INP_WLOCK here, we are assured
836 * that the connection is not currently
837 * executing inside the CC module's functions
838 * i.e. it is safe to make the switch back to
841 if (CC_ALGO(tp) == unload_algo) {
842 tmpalgo = CC_ALGO(tp);
843 /* NewReno does not require any init. */
844 CC_ALGO(tp) = &newreno_cc_algo;
845 if (tmpalgo->cb_destroy != NULL)
846 tmpalgo->cb_destroy(tp->ccv);
851 INP_INFO_RUNLOCK(&V_tcbinfo);
860 * Drop a TCP connection, reporting
861 * the specified error. If connection is synchronized,
862 * then send a RST to peer.
865 tcp_drop(struct tcpcb *tp, int errno)
867 struct socket *so = tp->t_inpcb->inp_socket;
869 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
870 INP_WLOCK_ASSERT(tp->t_inpcb);
872 if (TCPS_HAVERCVDSYN(tp->t_state)) {
873 tp->t_state = TCPS_CLOSED;
874 (void) tcp_output_reset(tp);
875 TCPSTAT_INC(tcps_drops);
877 TCPSTAT_INC(tcps_conndrops);
878 if (errno == ETIMEDOUT && tp->t_softerror)
879 errno = tp->t_softerror;
880 so->so_error = errno;
881 return (tcp_close(tp));
885 tcp_discardcb(struct tcpcb *tp)
887 struct inpcb *inp = tp->t_inpcb;
888 struct socket *so = inp->inp_socket;
890 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
893 INP_WLOCK_ASSERT(inp);
896 * Make sure that all of our timers are stopped before we
899 callout_stop(&tp->t_timers->tt_rexmt);
900 callout_stop(&tp->t_timers->tt_persist);
901 callout_stop(&tp->t_timers->tt_keep);
902 callout_stop(&tp->t_timers->tt_2msl);
903 callout_stop(&tp->t_timers->tt_delack);
906 * If we got enough samples through the srtt filter,
907 * save the rtt and rttvar in the routing entry.
908 * 'Enough' is arbitrarily defined as 4 rtt samples.
909 * 4 samples is enough for the srtt filter to converge
910 * to within enough % of the correct value; fewer samples
911 * and we could save a bogus rtt. The danger is not high
912 * as tcp quickly recovers from everything.
913 * XXX: Works very well but needs some more statistics!
915 if (tp->t_rttupdated >= 4) {
916 struct hc_metrics_lite metrics;
919 bzero(&metrics, sizeof(metrics));
921 * Update the ssthresh always when the conditions below
922 * are satisfied. This gives us better new start value
923 * for the congestion avoidance for new connections.
924 * ssthresh is only set if packet loss occured on a session.
926 * XXXRW: 'so' may be NULL here, and/or socket buffer may be
927 * being torn down. Ideally this code would not use 'so'.
929 ssthresh = tp->snd_ssthresh;
930 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
932 * convert the limit from user data bytes to
933 * packets then to packet data bytes.
935 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
938 ssthresh *= (u_long)(tp->t_maxseg +
940 (isipv6 ? sizeof (struct ip6_hdr) +
941 sizeof (struct tcphdr) :
943 sizeof (struct tcpiphdr)
950 metrics.rmx_ssthresh = ssthresh;
952 metrics.rmx_rtt = tp->t_srtt;
953 metrics.rmx_rttvar = tp->t_rttvar;
954 /* XXX: This wraps if the pipe is more than 4 Gbit per second */
955 metrics.rmx_bandwidth = tp->snd_bandwidth;
956 metrics.rmx_cwnd = tp->snd_cwnd;
957 metrics.rmx_sendpipe = 0;
958 metrics.rmx_recvpipe = 0;
960 tcp_hc_update(&inp->inp_inc, &metrics);
963 /* free the reassembly queue, if any */
965 /* Disconnect offload device, if any. */
966 tcp_offload_detach(tp);
968 tcp_free_sackholes(tp);
970 /* Allow the CC algorithm to clean up after itself. */
971 if (CC_ALGO(tp)->cb_destroy != NULL)
972 CC_ALGO(tp)->cb_destroy(tp->ccv);
974 khelp_destroy_osd(tp->osd);
977 inp->inp_ppcb = NULL;
979 uma_zfree(V_tcpcb_zone, tp);
983 * Attempt to close a TCP control block, marking it as dropped, and freeing
984 * the socket if we hold the only reference.
987 tcp_close(struct tcpcb *tp)
989 struct inpcb *inp = tp->t_inpcb;
992 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
993 INP_WLOCK_ASSERT(inp);
995 /* Notify any offload devices of listener close */
996 if (tp->t_state == TCPS_LISTEN)
997 tcp_offload_listen_close(tp);
999 TCPSTAT_INC(tcps_closed);
1000 KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
1001 so = inp->inp_socket;
1002 soisdisconnected(so);
1003 if (inp->inp_flags & INP_SOCKREF) {
1004 KASSERT(so->so_state & SS_PROTOREF,
1005 ("tcp_close: !SS_PROTOREF"));
1006 inp->inp_flags &= ~INP_SOCKREF;
1010 so->so_state &= ~SS_PROTOREF;
1020 VNET_ITERATOR_DECL(vnet_iter);
1025 VNET_LIST_RLOCK_NOSLEEP();
1026 VNET_FOREACH(vnet_iter) {
1027 CURVNET_SET(vnet_iter);
1032 * Walk the tcpbs, if existing, and flush the reassembly queue,
1033 * if there is one...
1034 * XXX: The "Net/3" implementation doesn't imply that the TCP
1035 * reassembly queue should be flushed, but in a situation
1036 * where we're really low on mbufs, this is potentially
1039 INP_INFO_RLOCK(&V_tcbinfo);
1040 LIST_FOREACH(inpb, V_tcbinfo.ipi_listhead, inp_list) {
1041 if (inpb->inp_flags & INP_TIMEWAIT)
1044 if ((tcpb = intotcpcb(inpb)) != NULL) {
1045 tcp_reass_flush(tcpb);
1046 tcp_clean_sackreport(tcpb);
1050 INP_INFO_RUNLOCK(&V_tcbinfo);
1053 VNET_LIST_RUNLOCK_NOSLEEP();
1057 * Notify a tcp user of an asynchronous error;
1058 * store error as soft error, but wake up user
1059 * (for now, won't do anything until can select for soft error).
1061 * Do not wake up user since there currently is no mechanism for
1062 * reporting soft errors (yet - a kqueue filter may be added).
1064 static struct inpcb *
1065 tcp_notify(struct inpcb *inp, int error)
1069 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1070 INP_WLOCK_ASSERT(inp);
1072 if ((inp->inp_flags & INP_TIMEWAIT) ||
1073 (inp->inp_flags & INP_DROPPED))
1076 tp = intotcpcb(inp);
1077 KASSERT(tp != NULL, ("tcp_notify: tp == NULL"));
1080 * Ignore some errors if we are hooked up.
1081 * If connection hasn't completed, has retransmitted several times,
1082 * and receives a second error, give up now. This is better
1083 * than waiting a long time to establish a connection that
1084 * can never complete.
1086 if (tp->t_state == TCPS_ESTABLISHED &&
1087 (error == EHOSTUNREACH || error == ENETUNREACH ||
1088 error == EHOSTDOWN)) {
1090 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
1092 tp = tcp_drop(tp, error);
1098 tp->t_softerror = error;
1102 wakeup( &so->so_timeo);
1109 tcp_pcblist(SYSCTL_HANDLER_ARGS)
1111 int error, i, m, n, pcb_count;
1112 struct inpcb *inp, **inp_list;
1117 * The process of preparing the TCB list is too time-consuming and
1118 * resource-intensive to repeat twice on every request.
1120 if (req->oldptr == NULL) {
1121 n = V_tcbinfo.ipi_count + syncache_pcbcount();
1122 n += imax(n / 8, 10);
1123 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb);
1127 if (req->newptr != NULL)
1131 * OK, now we're committed to doing something.
1133 INP_INFO_RLOCK(&V_tcbinfo);
1134 gencnt = V_tcbinfo.ipi_gencnt;
1135 n = V_tcbinfo.ipi_count;
1136 INP_INFO_RUNLOCK(&V_tcbinfo);
1138 m = syncache_pcbcount();
1140 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
1141 + (n + m) * sizeof(struct xtcpcb));
1145 xig.xig_len = sizeof xig;
1146 xig.xig_count = n + m;
1147 xig.xig_gen = gencnt;
1148 xig.xig_sogen = so_gencnt;
1149 error = SYSCTL_OUT(req, &xig, sizeof xig);
1153 error = syncache_pcblist(req, m, &pcb_count);
1157 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1158 if (inp_list == NULL)
1161 INP_INFO_RLOCK(&V_tcbinfo);
1162 for (inp = LIST_FIRST(V_tcbinfo.ipi_listhead), i = 0;
1163 inp != NULL && i < n; inp = LIST_NEXT(inp, inp_list)) {
1165 if (inp->inp_gencnt <= gencnt) {
1167 * XXX: This use of cr_cansee(), introduced with
1168 * TCP state changes, is not quite right, but for
1169 * now, better than nothing.
1171 if (inp->inp_flags & INP_TIMEWAIT) {
1172 if (intotw(inp) != NULL)
1173 error = cr_cansee(req->td->td_ucred,
1174 intotw(inp)->tw_cred);
1176 error = EINVAL; /* Skip this inp. */
1178 error = cr_canseeinpcb(req->td->td_ucred, inp);
1181 inp_list[i++] = inp;
1186 INP_INFO_RUNLOCK(&V_tcbinfo);
1190 for (i = 0; i < n; i++) {
1193 if (inp->inp_gencnt <= gencnt) {
1197 bzero(&xt, sizeof(xt));
1198 xt.xt_len = sizeof xt;
1199 /* XXX should avoid extra copy */
1200 bcopy(inp, &xt.xt_inp, sizeof *inp);
1201 inp_ppcb = inp->inp_ppcb;
1202 if (inp_ppcb == NULL)
1203 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
1204 else if (inp->inp_flags & INP_TIMEWAIT) {
1205 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
1206 xt.xt_tp.t_state = TCPS_TIME_WAIT;
1208 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
1209 if (inp->inp_socket != NULL)
1210 sotoxsocket(inp->inp_socket, &xt.xt_socket);
1212 bzero(&xt.xt_socket, sizeof xt.xt_socket);
1213 xt.xt_socket.xso_protocol = IPPROTO_TCP;
1215 xt.xt_inp.inp_gencnt = inp->inp_gencnt;
1217 error = SYSCTL_OUT(req, &xt, sizeof xt);
1221 INP_INFO_WLOCK(&V_tcbinfo);
1222 for (i = 0; i < n; i++) {
1225 if (!in_pcbrele(inp))
1228 INP_INFO_WUNLOCK(&V_tcbinfo);
1232 * Give the user an updated idea of our state.
1233 * If the generation differs from what we told
1234 * her before, she knows that something happened
1235 * while we were processing this request, and it
1236 * might be necessary to retry.
1238 INP_INFO_RLOCK(&V_tcbinfo);
1239 xig.xig_gen = V_tcbinfo.ipi_gencnt;
1240 xig.xig_sogen = so_gencnt;
1241 xig.xig_count = V_tcbinfo.ipi_count + pcb_count;
1242 INP_INFO_RUNLOCK(&V_tcbinfo);
1243 error = SYSCTL_OUT(req, &xig, sizeof xig);
1245 free(inp_list, M_TEMP);
1249 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
1250 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
1251 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
1254 tcp_getcred(SYSCTL_HANDLER_ARGS)
1257 struct sockaddr_in addrs[2];
1261 error = priv_check(req->td, PRIV_NETINET_GETCRED);
1264 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1267 INP_INFO_RLOCK(&V_tcbinfo);
1268 inp = in_pcblookup_hash(&V_tcbinfo, addrs[1].sin_addr,
1269 addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
1272 INP_INFO_RUNLOCK(&V_tcbinfo);
1273 if (inp->inp_socket == NULL)
1276 error = cr_canseeinpcb(req->td->td_ucred, inp);
1278 cru2x(inp->inp_cred, &xuc);
1281 INP_INFO_RUNLOCK(&V_tcbinfo);
1285 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1289 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
1290 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1291 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection");
1295 tcp6_getcred(SYSCTL_HANDLER_ARGS)
1298 struct sockaddr_in6 addrs[2];
1300 int error, mapped = 0;
1302 error = priv_check(req->td, PRIV_NETINET_GETCRED);
1305 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1308 if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 ||
1309 (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) {
1312 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
1313 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
1319 INP_INFO_RLOCK(&V_tcbinfo);
1321 inp = in_pcblookup_hash(&V_tcbinfo,
1322 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
1324 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
1328 inp = in6_pcblookup_hash(&V_tcbinfo,
1329 &addrs[1].sin6_addr, addrs[1].sin6_port,
1330 &addrs[0].sin6_addr, addrs[0].sin6_port, 0, NULL);
1333 INP_INFO_RUNLOCK(&V_tcbinfo);
1334 if (inp->inp_socket == NULL)
1337 error = cr_canseeinpcb(req->td->td_ucred, inp);
1339 cru2x(inp->inp_cred, &xuc);
1342 INP_INFO_RUNLOCK(&V_tcbinfo);
1346 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1350 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
1351 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1352 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection");
1357 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
1359 struct ip *ip = vip;
1361 struct in_addr faddr;
1364 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1366 struct in_conninfo inc;
1367 tcp_seq icmp_tcp_seq;
1370 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1371 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1374 if (cmd == PRC_MSGSIZE)
1375 notify = tcp_mtudisc;
1376 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1377 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip)
1378 notify = tcp_drop_syn_sent;
1380 * Redirects don't need to be handled up here.
1382 else if (PRC_IS_REDIRECT(cmd))
1385 * Source quench is depreciated.
1387 else if (cmd == PRC_QUENCH)
1390 * Hostdead is ugly because it goes linearly through all PCBs.
1391 * XXX: We never get this from ICMP, otherwise it makes an
1392 * excellent DoS attack on machines with many connections.
1394 else if (cmd == PRC_HOSTDEAD)
1396 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
1399 icp = (struct icmp *)((caddr_t)ip
1400 - offsetof(struct icmp, icmp_ip));
1401 th = (struct tcphdr *)((caddr_t)ip
1402 + (ip->ip_hl << 2));
1403 INP_INFO_WLOCK(&V_tcbinfo);
1404 inp = in_pcblookup_hash(&V_tcbinfo, faddr, th->th_dport,
1405 ip->ip_src, th->th_sport, 0, NULL);
1408 if (!(inp->inp_flags & INP_TIMEWAIT) &&
1409 !(inp->inp_flags & INP_DROPPED) &&
1410 !(inp->inp_socket == NULL)) {
1411 icmp_tcp_seq = htonl(th->th_seq);
1412 tp = intotcpcb(inp);
1413 if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
1414 SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
1415 if (cmd == PRC_MSGSIZE) {
1418 * If we got a needfrag set the MTU
1419 * in the route to the suggested new
1420 * value (if given) and then notify.
1422 bzero(&inc, sizeof(inc));
1423 inc.inc_faddr = faddr;
1425 inp->inp_inc.inc_fibnum;
1427 mtu = ntohs(icp->icmp_nextmtu);
1429 * If no alternative MTU was
1430 * proposed, try the next smaller
1431 * one. ip->ip_len has already
1432 * been swapped in icmp_input().
1435 mtu = ip_next_mtu(ip->ip_len,
1437 if (mtu < V_tcp_minmss
1438 + sizeof(struct tcpiphdr))
1440 + sizeof(struct tcpiphdr);
1442 * Only cache the MTU if it
1443 * is smaller than the interface
1444 * or route MTU. tcp_mtudisc()
1445 * will do right thing by itself.
1447 if (mtu <= tcp_maxmtu(&inc, NULL))
1448 tcp_hc_updatemtu(&inc, mtu);
1451 inp = (*notify)(inp, inetctlerrmap[cmd]);
1457 bzero(&inc, sizeof(inc));
1458 inc.inc_fport = th->th_dport;
1459 inc.inc_lport = th->th_sport;
1460 inc.inc_faddr = faddr;
1461 inc.inc_laddr = ip->ip_src;
1462 syncache_unreach(&inc, th);
1464 INP_INFO_WUNLOCK(&V_tcbinfo);
1466 in_pcbnotifyall(&V_tcbinfo, faddr, inetctlerrmap[cmd], notify);
1471 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
1474 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1475 struct ip6_hdr *ip6;
1477 struct ip6ctlparam *ip6cp = NULL;
1478 const struct sockaddr_in6 *sa6_src = NULL;
1480 struct tcp_portonly {
1485 if (sa->sa_family != AF_INET6 ||
1486 sa->sa_len != sizeof(struct sockaddr_in6))
1489 if (cmd == PRC_MSGSIZE)
1490 notify = tcp_mtudisc;
1491 else if (!PRC_IS_REDIRECT(cmd) &&
1492 ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
1494 /* Source quench is depreciated. */
1495 else if (cmd == PRC_QUENCH)
1498 /* if the parameter is from icmp6, decode it. */
1500 ip6cp = (struct ip6ctlparam *)d;
1502 ip6 = ip6cp->ip6c_ip6;
1503 off = ip6cp->ip6c_off;
1504 sa6_src = ip6cp->ip6c_src;
1508 off = 0; /* fool gcc */
1513 struct in_conninfo inc;
1515 * XXX: We assume that when IPV6 is non NULL,
1516 * M and OFF are valid.
1519 /* check if we can safely examine src and dst ports */
1520 if (m->m_pkthdr.len < off + sizeof(*thp))
1523 bzero(&th, sizeof(th));
1524 m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
1526 in6_pcbnotify(&V_tcbinfo, sa, th.th_dport,
1527 (struct sockaddr *)ip6cp->ip6c_src,
1528 th.th_sport, cmd, NULL, notify);
1530 bzero(&inc, sizeof(inc));
1531 inc.inc_fport = th.th_dport;
1532 inc.inc_lport = th.th_sport;
1533 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
1534 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
1535 inc.inc_flags |= INC_ISIPV6;
1536 INP_INFO_WLOCK(&V_tcbinfo);
1537 syncache_unreach(&inc, &th);
1538 INP_INFO_WUNLOCK(&V_tcbinfo);
1540 in6_pcbnotify(&V_tcbinfo, sa, 0, (const struct sockaddr *)sa6_src,
1541 0, cmd, NULL, notify);
1547 * Following is where TCP initial sequence number generation occurs.
1549 * There are two places where we must use initial sequence numbers:
1550 * 1. In SYN-ACK packets.
1551 * 2. In SYN packets.
1553 * All ISNs for SYN-ACK packets are generated by the syncache. See
1554 * tcp_syncache.c for details.
1556 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1557 * depends on this property. In addition, these ISNs should be
1558 * unguessable so as to prevent connection hijacking. To satisfy
1559 * the requirements of this situation, the algorithm outlined in
1560 * RFC 1948 is used, with only small modifications.
1562 * Implementation details:
1564 * Time is based off the system timer, and is corrected so that it
1565 * increases by one megabyte per second. This allows for proper
1566 * recycling on high speed LANs while still leaving over an hour
1569 * As reading the *exact* system time is too expensive to be done
1570 * whenever setting up a TCP connection, we increment the time
1571 * offset in two ways. First, a small random positive increment
1572 * is added to isn_offset for each connection that is set up.
1573 * Second, the function tcp_isn_tick fires once per clock tick
1574 * and increments isn_offset as necessary so that sequence numbers
1575 * are incremented at approximately ISN_BYTES_PER_SECOND. The
1576 * random positive increments serve only to ensure that the same
1577 * exact sequence number is never sent out twice (as could otherwise
1578 * happen when a port is recycled in less than the system tick
1581 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1582 * between seeding of isn_secret. This is normally set to zero,
1583 * as reseeding should not be necessary.
1585 * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
1586 * isn_offset_old, and isn_ctx is performed using the TCP pcbinfo lock. In
1587 * general, this means holding an exclusive (write) lock.
1590 #define ISN_BYTES_PER_SECOND 1048576
1591 #define ISN_STATIC_INCREMENT 4096
1592 #define ISN_RANDOM_INCREMENT (4096 - 1)
1594 static VNET_DEFINE(u_char, isn_secret[32]);
1595 static VNET_DEFINE(int, isn_last_reseed);
1596 static VNET_DEFINE(u_int32_t, isn_offset);
1597 static VNET_DEFINE(u_int32_t, isn_offset_old);
1599 #define V_isn_secret VNET(isn_secret)
1600 #define V_isn_last_reseed VNET(isn_last_reseed)
1601 #define V_isn_offset VNET(isn_offset)
1602 #define V_isn_offset_old VNET(isn_offset_old)
1605 tcp_new_isn(struct tcpcb *tp)
1608 u_int32_t md5_buffer[4];
1611 INP_WLOCK_ASSERT(tp->t_inpcb);
1614 /* Seed if this is the first use, reseed if requested. */
1615 if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) &&
1616 (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz)
1618 read_random(&V_isn_secret, sizeof(V_isn_secret));
1619 V_isn_last_reseed = ticks;
1622 /* Compute the md5 hash and return the ISN. */
1624 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
1625 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
1627 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
1628 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1629 sizeof(struct in6_addr));
1630 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1631 sizeof(struct in6_addr));
1635 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1636 sizeof(struct in_addr));
1637 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1638 sizeof(struct in_addr));
1640 MD5Update(&isn_ctx, (u_char *) &V_isn_secret, sizeof(V_isn_secret));
1641 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1642 new_isn = (tcp_seq) md5_buffer[0];
1643 V_isn_offset += ISN_STATIC_INCREMENT +
1644 (arc4random() & ISN_RANDOM_INCREMENT);
1645 new_isn += V_isn_offset;
1651 * Increment the offset to the next ISN_BYTES_PER_SECOND / 100 boundary
1652 * to keep time flowing at a relatively constant rate. If the random
1653 * increments have already pushed us past the projected offset, do nothing.
1656 tcp_isn_tick(void *xtp)
1658 VNET_ITERATOR_DECL(vnet_iter);
1659 u_int32_t projected_offset;
1661 VNET_LIST_RLOCK_NOSLEEP();
1663 VNET_FOREACH(vnet_iter) {
1664 CURVNET_SET(vnet_iter); /* XXX appease INVARIANTS */
1666 V_isn_offset_old + ISN_BYTES_PER_SECOND / 100;
1668 if (SEQ_GT(projected_offset, V_isn_offset))
1669 V_isn_offset = projected_offset;
1671 V_isn_offset_old = V_isn_offset;
1675 VNET_LIST_RUNLOCK_NOSLEEP();
1676 callout_reset(&isn_callout, hz/100, tcp_isn_tick, NULL);
1680 * When a specific ICMP unreachable message is received and the
1681 * connection state is SYN-SENT, drop the connection. This behavior
1682 * is controlled by the icmp_may_rst sysctl.
1685 tcp_drop_syn_sent(struct inpcb *inp, int errno)
1689 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1690 INP_WLOCK_ASSERT(inp);
1692 if ((inp->inp_flags & INP_TIMEWAIT) ||
1693 (inp->inp_flags & INP_DROPPED))
1696 tp = intotcpcb(inp);
1697 if (tp->t_state != TCPS_SYN_SENT)
1700 tp = tcp_drop(tp, errno);
1708 * When `need fragmentation' ICMP is received, update our idea of the MSS
1709 * based on the new value in the route. Also nudge TCP to send something,
1710 * since we know the packet we just sent was dropped.
1711 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1714 tcp_mtudisc(struct inpcb *inp, int errno)
1719 INP_WLOCK_ASSERT(inp);
1720 if ((inp->inp_flags & INP_TIMEWAIT) ||
1721 (inp->inp_flags & INP_DROPPED))
1724 tp = intotcpcb(inp);
1725 KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL"));
1727 tcp_mss_update(tp, -1, NULL, NULL);
1729 so = inp->inp_socket;
1730 SOCKBUF_LOCK(&so->so_snd);
1731 /* If the mss is larger than the socket buffer, decrease the mss. */
1732 if (so->so_snd.sb_hiwat < tp->t_maxseg)
1733 tp->t_maxseg = so->so_snd.sb_hiwat;
1734 SOCKBUF_UNLOCK(&so->so_snd);
1736 TCPSTAT_INC(tcps_mturesent);
1738 tp->snd_nxt = tp->snd_una;
1739 tcp_free_sackholes(tp);
1740 tp->snd_recover = tp->snd_max;
1741 if (tp->t_flags & TF_SACK_PERMIT)
1742 EXIT_FASTRECOVERY(tp->t_flags);
1743 tcp_output_send(tp);
1748 * Look-up the routing entry to the peer of this inpcb. If no route
1749 * is found and it cannot be allocated, then return 0. This routine
1750 * is called by TCP routines that access the rmx structure and by
1751 * tcp_mss_update to get the peer/interface MTU.
1754 tcp_maxmtu(struct in_conninfo *inc, int *flags)
1757 struct sockaddr_in *dst;
1761 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
1763 bzero(&sro, sizeof(sro));
1764 if (inc->inc_faddr.s_addr != INADDR_ANY) {
1765 dst = (struct sockaddr_in *)&sro.ro_dst;
1766 dst->sin_family = AF_INET;
1767 dst->sin_len = sizeof(*dst);
1768 dst->sin_addr = inc->inc_faddr;
1769 in_rtalloc_ign(&sro, 0, inc->inc_fibnum);
1771 if (sro.ro_rt != NULL) {
1772 ifp = sro.ro_rt->rt_ifp;
1773 if (sro.ro_rt->rt_rmx.rmx_mtu == 0)
1774 maxmtu = ifp->if_mtu;
1776 maxmtu = min(sro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu);
1778 /* Report additional interface capabilities. */
1779 if (flags != NULL) {
1780 if (ifp->if_capenable & IFCAP_TSO4 &&
1781 ifp->if_hwassist & CSUM_TSO)
1791 tcp_maxmtu6(struct in_conninfo *inc, int *flags)
1793 struct route_in6 sro6;
1797 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
1799 bzero(&sro6, sizeof(sro6));
1800 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
1801 sro6.ro_dst.sin6_family = AF_INET6;
1802 sro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6);
1803 sro6.ro_dst.sin6_addr = inc->inc6_faddr;
1804 in6_rtalloc_ign(&sro6, 0, inc->inc_fibnum);
1806 if (sro6.ro_rt != NULL) {
1807 ifp = sro6.ro_rt->rt_ifp;
1808 if (sro6.ro_rt->rt_rmx.rmx_mtu == 0)
1809 maxmtu = IN6_LINKMTU(sro6.ro_rt->rt_ifp);
1811 maxmtu = min(sro6.ro_rt->rt_rmx.rmx_mtu,
1812 IN6_LINKMTU(sro6.ro_rt->rt_ifp));
1814 /* Report additional interface capabilities. */
1815 if (flags != NULL) {
1816 if (ifp->if_capenable & IFCAP_TSO6 &&
1817 ifp->if_hwassist & CSUM_TSO)
1828 /* compute ESP/AH header size for TCP, including outer IP header. */
1830 ipsec_hdrsiz_tcp(struct tcpcb *tp)
1837 struct ip6_hdr *ip6;
1841 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1843 MGETHDR(m, M_DONTWAIT, MT_DATA);
1848 if ((inp->inp_vflag & INP_IPV6) != 0) {
1849 ip6 = mtod(m, struct ip6_hdr *);
1850 th = (struct tcphdr *)(ip6 + 1);
1851 m->m_pkthdr.len = m->m_len =
1852 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1853 tcpip_fillheaders(inp, ip6, th);
1854 hdrsiz = ipsec_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1858 ip = mtod(m, struct ip *);
1859 th = (struct tcphdr *)(ip + 1);
1860 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1861 tcpip_fillheaders(inp, ip, th);
1862 hdrsiz = ipsec_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1871 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
1873 * This code attempts to calculate the bandwidth-delay product as a
1874 * means of determining the optimal window size to maximize bandwidth,
1875 * minimize RTT, and avoid the over-allocation of buffers on interfaces and
1876 * routers. This code also does a fairly good job keeping RTTs in check
1877 * across slow links like modems. We implement an algorithm which is very
1878 * similar (but not meant to be) TCP/Vegas. The code operates on the
1879 * transmitter side of a TCP connection and so only effects the transmit
1880 * side of the connection.
1882 * BACKGROUND: TCP makes no provision for the management of buffer space
1883 * at the end points or at the intermediate routers and switches. A TCP
1884 * stream, whether using NewReno or not, will eventually buffer as
1885 * many packets as it is able and the only reason this typically works is
1886 * due to the fairly small default buffers made available for a connection
1887 * (typicaly 16K or 32K). As machines use larger windows and/or window
1888 * scaling it is now fairly easy for even a single TCP connection to blow-out
1889 * all available buffer space not only on the local interface, but on
1890 * intermediate routers and switches as well. NewReno makes a misguided
1891 * attempt to 'solve' this problem by waiting for an actual failure to occur,
1892 * then backing off, then steadily increasing the window again until another
1893 * failure occurs, ad-infinitum. This results in terrible oscillation that
1894 * is only made worse as network loads increase and the idea of intentionally
1895 * blowing out network buffers is, frankly, a terrible way to manage network
1898 * It is far better to limit the transmit window prior to the failure
1899 * condition being achieved. There are two general ways to do this: First
1900 * you can 'scan' through different transmit window sizes and locate the
1901 * point where the RTT stops increasing, indicating that you have filled the
1902 * pipe, then scan backwards until you note that RTT stops decreasing, then
1903 * repeat ad-infinitum. This method works in principle but has severe
1904 * implementation issues due to RTT variances, timer granularity, and
1905 * instability in the algorithm which can lead to many false positives and
1906 * create oscillations as well as interact badly with other TCP streams
1907 * implementing the same algorithm.
1909 * The second method is to limit the window to the bandwidth delay product
1910 * of the link. This is the method we implement. RTT variances and our
1911 * own manipulation of the congestion window, bwnd, can potentially
1912 * destabilize the algorithm. For this reason we have to stabilize the
1913 * elements used to calculate the window. We do this by using the minimum
1914 * observed RTT, the long term average of the observed bandwidth, and
1915 * by adding two segments worth of slop. It isn't perfect but it is able
1916 * to react to changing conditions and gives us a very stable basis on
1917 * which to extend the algorithm.
1920 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
1926 INP_WLOCK_ASSERT(tp->t_inpcb);
1929 * If inflight_enable is disabled in the middle of a tcp connection,
1930 * make sure snd_bwnd is effectively disabled.
1932 if (V_tcp_inflight_enable == 0 ||
1933 tp->t_rttlow < V_tcp_inflight_rttthresh) {
1934 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1935 tp->snd_bandwidth = 0;
1940 * Figure out the bandwidth. Due to the tick granularity this
1941 * is a very rough number and it MUST be averaged over a fairly
1942 * long period of time. XXX we need to take into account a link
1943 * that is not using all available bandwidth, but for now our
1944 * slop will ramp us up if this case occurs and the bandwidth later
1947 * Note: if ticks rollover 'bw' may wind up negative. We must
1948 * effectively reset t_bw_rtttime for this case.
1951 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1)
1954 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz /
1955 (save_ticks - tp->t_bw_rtttime);
1956 tp->t_bw_rtttime = save_ticks;
1957 tp->t_bw_rtseq = ack_seq;
1958 if (tp->t_bw_rtttime == 0 || (int)bw < 0)
1960 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4;
1962 tp->snd_bandwidth = bw;
1965 * Calculate the semi-static bandwidth delay product, plus two maximal
1966 * segments. The additional slop puts us squarely in the sweet
1967 * spot and also handles the bandwidth run-up case and stabilization.
1968 * Without the slop we could be locking ourselves into a lower
1971 * Situations Handled:
1972 * (1) Prevents over-queueing of packets on LANs, especially on
1973 * high speed LANs, allowing larger TCP buffers to be
1974 * specified, and also does a good job preventing
1975 * over-queueing of packets over choke points like modems
1976 * (at least for the transmit side).
1978 * (2) Is able to handle changing network loads (bandwidth
1979 * drops so bwnd drops, bandwidth increases so bwnd
1982 * (3) Theoretically should stabilize in the face of multiple
1983 * connections implementing the same algorithm (this may need
1986 * (4) Stability value (defaults to 20 = 2 maximal packets) can
1987 * be adjusted with a sysctl but typically only needs to be
1988 * on very slow connections. A value no smaller then 5
1989 * should be used, but only reduce this default if you have
1992 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2)
1993 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + V_tcp_inflight_stab * tp->t_maxseg / 10;
1996 if (tcp_inflight_debug > 0) {
1998 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) {
2000 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n",
2009 if ((long)bwnd < V_tcp_inflight_min)
2010 bwnd = V_tcp_inflight_min;
2011 if (bwnd > V_tcp_inflight_max)
2012 bwnd = V_tcp_inflight_max;
2013 if ((long)bwnd < tp->t_maxseg * 2)
2014 bwnd = tp->t_maxseg * 2;
2015 tp->snd_bwnd = bwnd;
2018 #ifdef TCP_SIGNATURE
2020 * Callback function invoked by m_apply() to digest TCP segment data
2021 * contained within an mbuf chain.
2024 tcp_signature_apply(void *fstate, void *data, u_int len)
2027 MD5Update(fstate, (u_char *)data, len);
2032 * Compute TCP-MD5 hash of a TCP segment. (RFC2385)
2035 * m pointer to head of mbuf chain
2037 * len length of TCP segment data, excluding options
2038 * optlen length of TCP segment options
2039 * buf pointer to storage for computed MD5 digest
2040 * direction direction of flow (IPSEC_DIR_INBOUND or OUTBOUND)
2042 * We do this over ip, tcphdr, segment data, and the key in the SADB.
2043 * When called from tcp_input(), we can be sure that th_sum has been
2044 * zeroed out and verified already.
2046 * Return 0 if successful, otherwise return -1.
2048 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
2049 * search with the destination IP address, and a 'magic SPI' to be
2050 * determined by the application. This is hardcoded elsewhere to 1179
2051 * right now. Another branch of this code exists which uses the SPD to
2052 * specify per-application flows but it is unstable.
2055 tcp_signature_compute(struct mbuf *m, int _unused, int len, int optlen,
2056 u_char *buf, u_int direction)
2058 union sockaddr_union dst;
2059 struct ippseudo ippseudo;
2063 struct ipovly *ipovly;
2064 struct secasvar *sav;
2067 struct ip6_hdr *ip6;
2068 struct in6_addr in6;
2069 char ip6buf[INET6_ADDRSTRLEN];
2075 KASSERT(m != NULL, ("NULL mbuf chain"));
2076 KASSERT(buf != NULL, ("NULL signature pointer"));
2078 /* Extract the destination from the IP header in the mbuf. */
2079 bzero(&dst, sizeof(union sockaddr_union));
2080 ip = mtod(m, struct ip *);
2082 ip6 = NULL; /* Make the compiler happy. */
2086 dst.sa.sa_len = sizeof(struct sockaddr_in);
2087 dst.sa.sa_family = AF_INET;
2088 dst.sin.sin_addr = (direction == IPSEC_DIR_INBOUND) ?
2089 ip->ip_src : ip->ip_dst;
2092 case (IPV6_VERSION >> 4):
2093 ip6 = mtod(m, struct ip6_hdr *);
2094 dst.sa.sa_len = sizeof(struct sockaddr_in6);
2095 dst.sa.sa_family = AF_INET6;
2096 dst.sin6.sin6_addr = (direction == IPSEC_DIR_INBOUND) ?
2097 ip6->ip6_src : ip6->ip6_dst;
2106 /* Look up an SADB entry which matches the address of the peer. */
2107 sav = KEY_ALLOCSA(&dst, IPPROTO_TCP, htonl(TCP_SIG_SPI));
2109 ipseclog((LOG_ERR, "%s: SADB lookup failed for %s\n", __func__,
2110 (ip->ip_v == IPVERSION) ? inet_ntoa(dst.sin.sin_addr) :
2112 (ip->ip_v == (IPV6_VERSION >> 4)) ?
2113 ip6_sprintf(ip6buf, &dst.sin6.sin6_addr) :
2121 * Step 1: Update MD5 hash with IP(v6) pseudo-header.
2123 * XXX The ippseudo header MUST be digested in network byte order,
2124 * or else we'll fail the regression test. Assume all fields we've
2125 * been doing arithmetic on have been in host byte order.
2126 * XXX One cannot depend on ipovly->ih_len here. When called from
2127 * tcp_output(), the underlying ip_len member has not yet been set.
2131 ipovly = (struct ipovly *)ip;
2132 ippseudo.ippseudo_src = ipovly->ih_src;
2133 ippseudo.ippseudo_dst = ipovly->ih_dst;
2134 ippseudo.ippseudo_pad = 0;
2135 ippseudo.ippseudo_p = IPPROTO_TCP;
2136 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) +
2138 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo));
2140 th = (struct tcphdr *)((u_char *)ip + sizeof(struct ip));
2141 doff = sizeof(struct ip) + sizeof(struct tcphdr) + optlen;
2145 * RFC 2385, 2.0 Proposal
2146 * For IPv6, the pseudo-header is as described in RFC 2460, namely the
2147 * 128-bit source IPv6 address, 128-bit destination IPv6 address, zero-
2148 * extended next header value (to form 32 bits), and 32-bit segment
2150 * Note: Upper-Layer Packet Length comes before Next Header.
2152 case (IPV6_VERSION >> 4):
2154 in6_clearscope(&in6);
2155 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr));
2157 in6_clearscope(&in6);
2158 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr));
2159 plen = htonl(len + sizeof(struct tcphdr) + optlen);
2160 MD5Update(&ctx, (char *)&plen, sizeof(uint32_t));
2162 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2163 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2164 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2166 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2168 th = (struct tcphdr *)((u_char *)ip6 + sizeof(struct ip6_hdr));
2169 doff = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + optlen;
2180 * Step 2: Update MD5 hash with TCP header, excluding options.
2181 * The TCP checksum must be set to zero.
2183 savecsum = th->th_sum;
2185 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr));
2186 th->th_sum = savecsum;
2189 * Step 3: Update MD5 hash with TCP segment data.
2190 * Use m_apply() to avoid an early m_pullup().
2193 m_apply(m, doff, len, tcp_signature_apply, &ctx);
2196 * Step 4: Update MD5 hash with shared secret.
2198 MD5Update(&ctx, sav->key_auth->key_data, _KEYLEN(sav->key_auth));
2199 MD5Final(buf, &ctx);
2201 key_sa_recordxfer(sav, m);
2207 * Verify the TCP-MD5 hash of a TCP segment. (RFC2385)
2210 * m pointer to head of mbuf chain
2211 * len length of TCP segment data, excluding options
2212 * optlen length of TCP segment options
2213 * buf pointer to storage for computed MD5 digest
2214 * direction direction of flow (IPSEC_DIR_INBOUND or OUTBOUND)
2216 * Return 1 if successful, otherwise return 0.
2219 tcp_signature_verify(struct mbuf *m, int off0, int tlen, int optlen,
2220 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag)
2222 char tmpdigest[TCP_SIGLEN];
2224 if (tcp_sig_checksigs == 0)
2226 if ((tcpbflag & TF_SIGNATURE) == 0) {
2227 if ((to->to_flags & TOF_SIGNATURE) != 0) {
2230 * If this socket is not expecting signature but
2231 * the segment contains signature just fail.
2233 TCPSTAT_INC(tcps_sig_err_sigopt);
2234 TCPSTAT_INC(tcps_sig_rcvbadsig);
2238 /* Signature is not expected, and not present in segment. */
2243 * If this socket is expecting signature but the segment does not
2244 * contain any just fail.
2246 if ((to->to_flags & TOF_SIGNATURE) == 0) {
2247 TCPSTAT_INC(tcps_sig_err_nosigopt);
2248 TCPSTAT_INC(tcps_sig_rcvbadsig);
2251 if (tcp_signature_compute(m, off0, tlen, optlen, &tmpdigest[0],
2252 IPSEC_DIR_INBOUND) == -1) {
2253 TCPSTAT_INC(tcps_sig_err_buildsig);
2254 TCPSTAT_INC(tcps_sig_rcvbadsig);
2258 if (bcmp(to->to_signature, &tmpdigest[0], TCP_SIGLEN) != 0) {
2259 TCPSTAT_INC(tcps_sig_rcvbadsig);
2262 TCPSTAT_INC(tcps_sig_rcvgoodsig);
2265 #endif /* TCP_SIGNATURE */
2268 sysctl_drop(SYSCTL_HANDLER_ARGS)
2270 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
2271 struct sockaddr_storage addrs[2];
2275 struct sockaddr_in *fin, *lin;
2277 struct sockaddr_in6 *fin6, *lin6;
2288 if (req->oldptr != NULL || req->oldlen != 0)
2290 if (req->newptr == NULL)
2292 if (req->newlen < sizeof(addrs))
2294 error = SYSCTL_IN(req, &addrs, sizeof(addrs));
2298 switch (addrs[0].ss_family) {
2301 fin6 = (struct sockaddr_in6 *)&addrs[0];
2302 lin6 = (struct sockaddr_in6 *)&addrs[1];
2303 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
2304 lin6->sin6_len != sizeof(struct sockaddr_in6))
2306 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
2307 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
2309 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
2310 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
2311 fin = (struct sockaddr_in *)&addrs[0];
2312 lin = (struct sockaddr_in *)&addrs[1];
2315 error = sa6_embedscope(fin6, V_ip6_use_defzone);
2318 error = sa6_embedscope(lin6, V_ip6_use_defzone);
2324 fin = (struct sockaddr_in *)&addrs[0];
2325 lin = (struct sockaddr_in *)&addrs[1];
2326 if (fin->sin_len != sizeof(struct sockaddr_in) ||
2327 lin->sin_len != sizeof(struct sockaddr_in))
2333 INP_INFO_WLOCK(&V_tcbinfo);
2334 switch (addrs[0].ss_family) {
2337 inp = in6_pcblookup_hash(&V_tcbinfo, &fin6->sin6_addr,
2338 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port, 0,
2343 inp = in_pcblookup_hash(&V_tcbinfo, fin->sin_addr,
2344 fin->sin_port, lin->sin_addr, lin->sin_port, 0, NULL);
2349 if (inp->inp_flags & INP_TIMEWAIT) {
2351 * XXXRW: There currently exists a state where an
2352 * inpcb is present, but its timewait state has been
2353 * discarded. For now, don't allow dropping of this
2361 } else if (!(inp->inp_flags & INP_DROPPED) &&
2362 !(inp->inp_socket->so_options & SO_ACCEPTCONN)) {
2363 tp = intotcpcb(inp);
2364 tp = tcp_drop(tp, ECONNABORTED);
2371 INP_INFO_WUNLOCK(&V_tcbinfo);
2375 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop,
2376 CTLTYPE_STRUCT|CTLFLAG_WR|CTLFLAG_SKIP, NULL,
2377 0, sysctl_drop, "", "Drop TCP connection");
2380 * Generate a standardized TCP log line for use throughout the
2381 * tcp subsystem. Memory allocation is done with M_NOWAIT to
2382 * allow use in the interrupt context.
2384 * NB: The caller MUST free(s, M_TCPLOG) the returned string.
2385 * NB: The function may return NULL if memory allocation failed.
2387 * Due to header inclusion and ordering limitations the struct ip
2388 * and ip6_hdr pointers have to be passed as void pointers.
2391 tcp_log_vain(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
2395 /* Is logging enabled? */
2396 if (tcp_log_in_vain == 0)
2399 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
2403 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
2407 /* Is logging enabled? */
2408 if (tcp_log_debug == 0)
2411 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
2415 tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
2422 const struct ip6_hdr *ip6;
2424 ip6 = (const struct ip6_hdr *)ip6hdr;
2426 ip = (struct ip *)ip4hdr;
2429 * The log line looks like this:
2430 * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>"
2432 size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") +
2433 sizeof(PRINT_TH_FLAGS) + 1 +
2435 2 * INET6_ADDRSTRLEN;
2437 2 * INET_ADDRSTRLEN;
2440 s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT);
2444 strcat(s, "TCP: [");
2447 if (inc && ((inc->inc_flags & INC_ISIPV6) == 0)) {
2448 inet_ntoa_r(inc->inc_faddr, sp);
2450 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
2452 inet_ntoa_r(inc->inc_laddr, sp);
2454 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
2457 ip6_sprintf(sp, &inc->inc6_faddr);
2459 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
2461 ip6_sprintf(sp, &inc->inc6_laddr);
2463 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
2464 } else if (ip6 && th) {
2465 ip6_sprintf(sp, &ip6->ip6_src);
2467 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
2469 ip6_sprintf(sp, &ip6->ip6_dst);
2471 sprintf(sp, "]:%i", ntohs(th->th_dport));
2473 } else if (ip && th) {
2474 inet_ntoa_r(ip->ip_src, sp);
2476 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
2478 inet_ntoa_r(ip->ip_dst, sp);
2480 sprintf(sp, "]:%i", ntohs(th->th_dport));
2487 sprintf(sp, " tcpflags 0x%b", th->th_flags, PRINT_TH_FLAGS);
2488 if (*(s + size - 1) != '\0')
2489 panic("%s: string too long", __func__);