2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_compat.h"
39 #include "opt_inet6.h"
40 #include "opt_ipsec.h"
41 #include "opt_tcpdebug.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/callout.h>
46 #include <sys/eventhandler.h>
48 #include <sys/hhook.h>
50 #include <sys/kernel.h>
52 #include <sys/khelp.h>
54 #include <sys/sysctl.h>
56 #include <sys/malloc.h>
57 #include <sys/refcount.h>
60 #include <sys/domain.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 #include <sys/protosw.h>
68 #include <sys/random.h>
72 #include <net/route.h>
74 #include <net/if_var.h>
77 #include <netinet/in.h>
78 #include <netinet/in_fib.h>
79 #include <netinet/in_kdtrace.h>
80 #include <netinet/in_pcb.h>
81 #include <netinet/in_systm.h>
82 #include <netinet/in_var.h>
83 #include <netinet/ip.h>
84 #include <netinet/ip_icmp.h>
85 #include <netinet/ip_var.h>
87 #include <netinet/icmp6.h>
88 #include <netinet/ip6.h>
89 #include <netinet6/in6_fib.h>
90 #include <netinet6/in6_pcb.h>
91 #include <netinet6/ip6_var.h>
92 #include <netinet6/scope6_var.h>
93 #include <netinet6/nd6.h>
96 #include <netinet/tcp.h>
97 #include <netinet/tcp_fsm.h>
98 #include <netinet/tcp_seq.h>
99 #include <netinet/tcp_timer.h>
100 #include <netinet/tcp_var.h>
101 #include <netinet/tcp_log_buf.h>
102 #include <netinet/tcp_syncache.h>
103 #include <netinet/cc/cc.h>
105 #include <netinet6/tcp6_var.h>
107 #include <netinet/tcpip.h>
108 #include <netinet/tcp_fastopen.h>
110 #include <netinet/tcp_pcap.h>
113 #include <netinet/tcp_debug.h>
116 #include <netinet6/ip6protosw.h>
119 #include <netinet/tcp_offload.h>
122 #include <netipsec/ipsec_support.h>
124 #include <machine/in_cksum.h>
127 #include <security/mac/mac_framework.h>
129 VNET_DEFINE(int, tcp_mssdflt) = TCP_MSS;
131 VNET_DEFINE(int, tcp_v6mssdflt) = TCP6_MSS;
134 struct rwlock tcp_function_lock;
137 sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS)
142 error = sysctl_handle_int(oidp, &new, 0, req);
143 if (error == 0 && req->newptr) {
144 if (new < TCP_MINMSS)
152 SYSCTL_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt,
153 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW, &VNET_NAME(tcp_mssdflt), 0,
154 &sysctl_net_inet_tcp_mss_check, "I",
155 "Default TCP Maximum Segment Size");
159 sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS)
163 new = V_tcp_v6mssdflt;
164 error = sysctl_handle_int(oidp, &new, 0, req);
165 if (error == 0 && req->newptr) {
166 if (new < TCP_MINMSS)
169 V_tcp_v6mssdflt = new;
174 SYSCTL_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
175 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW, &VNET_NAME(tcp_v6mssdflt), 0,
176 &sysctl_net_inet_tcp_mss_v6_check, "I",
177 "Default TCP Maximum Segment Size for IPv6");
181 * Minimum MSS we accept and use. This prevents DoS attacks where
182 * we are forced to a ridiculous low MSS like 20 and send hundreds
183 * of packets instead of one. The effect scales with the available
184 * bandwidth and quickly saturates the CPU and network interface
185 * with packet generation and sending. Set to zero to disable MINMSS
186 * checking. This setting prevents us from sending too small packets.
188 VNET_DEFINE(int, tcp_minmss) = TCP_MINMSS;
189 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_VNET | CTLFLAG_RW,
190 &VNET_NAME(tcp_minmss), 0,
191 "Minimum TCP Maximum Segment Size");
193 VNET_DEFINE(int, tcp_do_rfc1323) = 1;
194 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_VNET | CTLFLAG_RW,
195 &VNET_NAME(tcp_do_rfc1323), 0,
196 "Enable rfc1323 (high performance TCP) extensions");
198 static int tcp_log_debug = 0;
199 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW,
200 &tcp_log_debug, 0, "Log errors caused by incoming TCP segments");
202 static int tcp_tcbhashsize;
203 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
204 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
206 static int do_tcpdrain = 1;
207 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
208 "Enable tcp_drain routine for extra help when low on mbufs");
210 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_VNET | CTLFLAG_RD,
211 &VNET_NAME(tcbinfo.ipi_count), 0, "Number of active PCBs");
213 static VNET_DEFINE(int, icmp_may_rst) = 1;
214 #define V_icmp_may_rst VNET(icmp_may_rst)
215 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_VNET | CTLFLAG_RW,
216 &VNET_NAME(icmp_may_rst), 0,
217 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
219 static VNET_DEFINE(int, tcp_isn_reseed_interval) = 0;
220 #define V_tcp_isn_reseed_interval VNET(tcp_isn_reseed_interval)
221 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_VNET | CTLFLAG_RW,
222 &VNET_NAME(tcp_isn_reseed_interval), 0,
223 "Seconds between reseeding of ISN secret");
225 static int tcp_soreceive_stream;
226 SYSCTL_INT(_net_inet_tcp, OID_AUTO, soreceive_stream, CTLFLAG_RDTUN,
227 &tcp_soreceive_stream, 0, "Using soreceive_stream for TCP sockets");
229 VNET_DEFINE(uma_zone_t, sack_hole_zone);
230 #define V_sack_hole_zone VNET(sack_hole_zone)
233 VNET_DEFINE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST+1]);
236 static struct inpcb *tcp_notify(struct inpcb *, int);
237 static struct inpcb *tcp_mtudisc_notify(struct inpcb *, int);
238 static void tcp_mtudisc(struct inpcb *, int);
239 static char * tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th,
240 void *ip4hdr, const void *ip6hdr);
243 static struct tcp_function_block tcp_def_funcblk = {
247 tcp_default_ctloutput,
258 int t_functions_inited = 0;
259 struct tcp_funchead t_functions;
260 static struct tcp_function_block *tcp_func_set_ptr = &tcp_def_funcblk;
263 init_tcp_functions(void)
265 if (t_functions_inited == 0) {
266 TAILQ_INIT(&t_functions);
267 rw_init_flags(&tcp_function_lock, "tcp_func_lock" , 0);
268 t_functions_inited = 1;
272 static struct tcp_function_block *
273 find_tcp_functions_locked(struct tcp_function_set *fs)
275 struct tcp_function *f;
276 struct tcp_function_block *blk=NULL;
278 TAILQ_FOREACH(f, &t_functions, tf_next) {
279 if (strcmp(f->tf_name, fs->function_set_name) == 0) {
287 static struct tcp_function_block *
288 find_tcp_fb_locked(struct tcp_function_block *blk, struct tcp_function **s)
290 struct tcp_function_block *rblk=NULL;
291 struct tcp_function *f;
293 TAILQ_FOREACH(f, &t_functions, tf_next) {
294 if (f->tf_fb == blk) {
305 struct tcp_function_block *
306 find_and_ref_tcp_functions(struct tcp_function_set *fs)
308 struct tcp_function_block *blk;
310 rw_rlock(&tcp_function_lock);
311 blk = find_tcp_functions_locked(fs);
313 refcount_acquire(&blk->tfb_refcnt);
314 rw_runlock(&tcp_function_lock);
318 struct tcp_function_block *
319 find_and_ref_tcp_fb(struct tcp_function_block *blk)
321 struct tcp_function_block *rblk;
323 rw_rlock(&tcp_function_lock);
324 rblk = find_tcp_fb_locked(blk, NULL);
326 refcount_acquire(&rblk->tfb_refcnt);
327 rw_runlock(&tcp_function_lock);
333 sysctl_net_inet_default_tcp_functions(SYSCTL_HANDLER_ARGS)
336 struct tcp_function_set fs;
337 struct tcp_function_block *blk;
339 memset(&fs, 0, sizeof(fs));
340 rw_rlock(&tcp_function_lock);
341 blk = find_tcp_fb_locked(tcp_func_set_ptr, NULL);
344 strcpy(fs.function_set_name, blk->tfb_tcp_block_name);
345 fs.pcbcnt = blk->tfb_refcnt;
347 rw_runlock(&tcp_function_lock);
348 error = sysctl_handle_string(oidp, fs.function_set_name,
349 sizeof(fs.function_set_name), req);
351 /* Check for error or no change */
352 if (error != 0 || req->newptr == NULL)
355 rw_wlock(&tcp_function_lock);
356 blk = find_tcp_functions_locked(&fs);
358 (blk->tfb_flags & TCP_FUNC_BEING_REMOVED)) {
362 tcp_func_set_ptr = blk;
364 rw_wunlock(&tcp_function_lock);
368 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_default,
369 CTLTYPE_STRING | CTLFLAG_RW,
370 NULL, 0, sysctl_net_inet_default_tcp_functions, "A",
371 "Set/get the default TCP functions");
374 sysctl_net_inet_list_available(SYSCTL_HANDLER_ARGS)
376 int error, cnt, linesz;
377 struct tcp_function *f;
383 rw_rlock(&tcp_function_lock);
384 TAILQ_FOREACH(f, &t_functions, tf_next) {
387 rw_runlock(&tcp_function_lock);
389 bufsz = (cnt+2) * ((TCP_FUNCTION_NAME_LEN_MAX * 2) + 13) + 1;
390 buffer = malloc(bufsz, M_TEMP, M_WAITOK);
395 linesz = snprintf(cp, bufsz, "\n%-32s%c %-32s %s\n", "Stack", 'D',
396 "Alias", "PCB count");
401 rw_rlock(&tcp_function_lock);
402 TAILQ_FOREACH(f, &t_functions, tf_next) {
403 alias = (f->tf_name != f->tf_fb->tfb_tcp_block_name);
404 linesz = snprintf(cp, bufsz, "%-32s%c %-32s %u\n",
405 f->tf_fb->tfb_tcp_block_name,
406 (f->tf_fb == tcp_func_set_ptr) ? '*' : ' ',
407 alias ? f->tf_name : "-",
408 f->tf_fb->tfb_refcnt);
409 if (linesz >= bufsz) {
417 rw_runlock(&tcp_function_lock);
419 error = sysctl_handle_string(oidp, buffer, outsz + 1, req);
420 free(buffer, M_TEMP);
424 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_available,
425 CTLTYPE_STRING|CTLFLAG_RD,
426 NULL, 0, sysctl_net_inet_list_available, "A",
427 "list available TCP Function sets");
430 * Exports one (struct tcp_function_id) for each non-alias.
433 sysctl_net_inet_list_func_ids(SYSCTL_HANDLER_ARGS)
436 struct tcp_function *f;
437 struct tcp_function_id tfi;
440 * We don't allow writes.
442 if (req->newptr != NULL)
446 * Wire the old buffer so we can directly copy the functions to
447 * user space without dropping the lock.
449 if (req->oldptr != NULL) {
450 error = sysctl_wire_old_buffer(req, 0);
456 * Walk the list, comparing the name of the function entry and
457 * function block to determine which is an alias.
458 * If exporting the list, copy out matching entries. Otherwise,
459 * just record the total length.
462 rw_rlock(&tcp_function_lock);
463 TAILQ_FOREACH(f, &t_functions, tf_next) {
464 if (strncmp(f->tf_name, f->tf_fb->tfb_tcp_block_name,
465 TCP_FUNCTION_NAME_LEN_MAX))
467 if (req->oldptr != NULL) {
468 tfi.tfi_id = f->tf_fb->tfb_id;
469 (void)strncpy(tfi.tfi_name, f->tf_name,
470 TCP_FUNCTION_NAME_LEN_MAX);
471 tfi.tfi_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0';
472 error = SYSCTL_OUT(req, &tfi, sizeof(tfi));
474 * Don't stop on error, as that is the
475 * mechanism we use to accumulate length
476 * information if the buffer was too short.
481 rw_runlock(&tcp_function_lock);
482 if (req->oldptr == NULL)
483 error = SYSCTL_OUT(req, NULL,
484 (cnt + 1) * sizeof(struct tcp_function_id));
489 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, function_ids,
490 CTLTYPE_OPAQUE | CTLFLAG_SKIP | CTLFLAG_RD | CTLFLAG_MPSAFE,
491 NULL, 0, sysctl_net_inet_list_func_ids, "S,tcp_function_id",
492 "List TCP function block name-to-ID mappings");
495 * Target size of TCP PCB hash tables. Must be a power of two.
497 * Note that this can be overridden by the kernel environment
498 * variable net.inet.tcp.tcbhashsize
501 #define TCBHASHSIZE 0
506 * Callouts should be moved into struct tcp directly. They are currently
507 * separate because the tcpcb structure is exported to userland for sysctl
508 * parsing purposes, which do not know about callouts.
519 static VNET_DEFINE(uma_zone_t, tcpcb_zone);
520 #define V_tcpcb_zone VNET(tcpcb_zone)
522 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers");
523 MALLOC_DEFINE(M_TCPFUNCTIONS, "tcpfunc", "TCP function set memory");
525 static struct mtx isn_mtx;
527 #define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
528 #define ISN_LOCK() mtx_lock(&isn_mtx)
529 #define ISN_UNLOCK() mtx_unlock(&isn_mtx)
532 * TCP initialization.
535 tcp_zone_change(void *tag)
538 uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets);
539 uma_zone_set_max(V_tcpcb_zone, maxsockets);
540 tcp_tw_zone_change();
544 tcp_inpcb_init(void *mem, int size, int flags)
546 struct inpcb *inp = mem;
548 INP_LOCK_INIT(inp, "inp", "tcpinp");
553 * Take a value and get the next power of 2 that doesn't overflow.
554 * Used to size the tcp_inpcb hash buckets.
557 maketcp_hashsize(int size)
563 * get the next power of 2 higher than maxsockets.
565 hashsize = 1 << fls(size);
566 /* catch overflow, and just go one power of 2 smaller */
567 if (hashsize < size) {
568 hashsize = 1 << (fls(size) - 1);
573 static volatile int next_tcp_stack_id = 1;
576 * Register a TCP function block with the name provided in the names
577 * array. (Note that this function does NOT automatically register
578 * blk->tfb_tcp_block_name as a stack name. Therefore, you should
579 * explicitly include blk->tfb_tcp_block_name in the list of names if
580 * you wish to register the stack with that name.)
582 * Either all name registrations will succeed or all will fail. If
583 * a name registration fails, the function will update the num_names
584 * argument to point to the array index of the name that encountered
587 * Returns 0 on success, or an error code on failure.
590 register_tcp_functions_as_names(struct tcp_function_block *blk, int wait,
591 const char *names[], int *num_names)
593 struct tcp_function *n;
594 struct tcp_function_set fs;
597 KASSERT(names != NULL && *num_names > 0,
598 ("%s: Called with 0-length name list", __func__));
599 KASSERT(names != NULL, ("%s: Called with NULL name list", __func__));
601 if (t_functions_inited == 0) {
602 init_tcp_functions();
604 if ((blk->tfb_tcp_output == NULL) ||
605 (blk->tfb_tcp_do_segment == NULL) ||
606 (blk->tfb_tcp_ctloutput == NULL) ||
607 (strlen(blk->tfb_tcp_block_name) == 0)) {
609 * These functions are required and you
615 if (blk->tfb_tcp_timer_stop_all ||
616 blk->tfb_tcp_timer_activate ||
617 blk->tfb_tcp_timer_active ||
618 blk->tfb_tcp_timer_stop) {
620 * If you define one timer function you
621 * must have them all.
623 if ((blk->tfb_tcp_timer_stop_all == NULL) ||
624 (blk->tfb_tcp_timer_activate == NULL) ||
625 (blk->tfb_tcp_timer_active == NULL) ||
626 (blk->tfb_tcp_timer_stop == NULL)) {
632 refcount_init(&blk->tfb_refcnt, 0);
634 blk->tfb_id = atomic_fetchadd_int(&next_tcp_stack_id, 1);
635 for (i = 0; i < *num_names; i++) {
636 n = malloc(sizeof(struct tcp_function), M_TCPFUNCTIONS, wait);
643 (void)strncpy(fs.function_set_name, names[i],
644 TCP_FUNCTION_NAME_LEN_MAX);
645 fs.function_set_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0';
646 rw_wlock(&tcp_function_lock);
647 if (find_tcp_functions_locked(&fs) != NULL) {
648 /* Duplicate name space not allowed */
649 rw_wunlock(&tcp_function_lock);
650 free(n, M_TCPFUNCTIONS);
654 (void)strncpy(n->tf_name, names[i], TCP_FUNCTION_NAME_LEN_MAX);
655 n->tf_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0';
656 TAILQ_INSERT_TAIL(&t_functions, n, tf_next);
657 rw_wunlock(&tcp_function_lock);
663 * Deregister the names we just added. Because registration failed
664 * for names[i], we don't need to deregister that name.
667 rw_wlock(&tcp_function_lock);
669 TAILQ_FOREACH(n, &t_functions, tf_next) {
670 if (!strncmp(n->tf_name, names[i],
671 TCP_FUNCTION_NAME_LEN_MAX)) {
672 TAILQ_REMOVE(&t_functions, n, tf_next);
674 free(n, M_TCPFUNCTIONS);
679 rw_wunlock(&tcp_function_lock);
684 * Register a TCP function block using the name provided in the name
687 * Returns 0 on success, or an error code on failure.
690 register_tcp_functions_as_name(struct tcp_function_block *blk, const char *name,
693 const char *name_list[1];
700 name_list[0] = blk->tfb_tcp_block_name;
701 rv = register_tcp_functions_as_names(blk, wait, name_list, &num_names);
706 * Register a TCP function block using the name defined in
707 * blk->tfb_tcp_block_name.
709 * Returns 0 on success, or an error code on failure.
712 register_tcp_functions(struct tcp_function_block *blk, int wait)
715 return (register_tcp_functions_as_name(blk, NULL, wait));
719 deregister_tcp_functions(struct tcp_function_block *blk)
721 struct tcp_function *f;
724 if (strcmp(blk->tfb_tcp_block_name, "default") == 0) {
725 /* You can't un-register the default */
728 rw_wlock(&tcp_function_lock);
729 if (blk == tcp_func_set_ptr) {
730 /* You can't free the current default */
731 rw_wunlock(&tcp_function_lock);
734 if (blk->tfb_refcnt) {
735 /* Still tcb attached, mark it. */
736 blk->tfb_flags |= TCP_FUNC_BEING_REMOVED;
737 rw_wunlock(&tcp_function_lock);
740 while (find_tcp_fb_locked(blk, &f) != NULL) {
742 TAILQ_REMOVE(&t_functions, f, tf_next);
744 free(f, M_TCPFUNCTIONS);
747 rw_wunlock(&tcp_function_lock);
754 const char *tcbhash_tuneable;
757 tcbhash_tuneable = "net.inet.tcp.tcbhashsize";
760 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN,
761 &V_tcp_hhh[HHOOK_TCP_EST_IN], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
762 printf("%s: WARNING: unable to register helper hook\n", __func__);
763 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT,
764 &V_tcp_hhh[HHOOK_TCP_EST_OUT], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
765 printf("%s: WARNING: unable to register helper hook\n", __func__);
767 hashsize = TCBHASHSIZE;
768 TUNABLE_INT_FETCH(tcbhash_tuneable, &hashsize);
771 * Auto tune the hash size based on maxsockets.
772 * A perfect hash would have a 1:1 mapping
773 * (hashsize = maxsockets) however it's been
774 * suggested that O(2) average is better.
776 hashsize = maketcp_hashsize(maxsockets / 4);
778 * Our historical default is 512,
779 * do not autotune lower than this.
783 if (bootverbose && IS_DEFAULT_VNET(curvnet))
784 printf("%s: %s auto tuned to %d\n", __func__,
785 tcbhash_tuneable, hashsize);
788 * We require a hashsize to be a power of two.
789 * Previously if it was not a power of two we would just reset it
790 * back to 512, which could be a nasty surprise if you did not notice
792 * Instead what we do is clip it to the closest power of two lower
793 * than the specified hash value.
795 if (!powerof2(hashsize)) {
796 int oldhashsize = hashsize;
798 hashsize = maketcp_hashsize(hashsize);
799 /* prevent absurdly low value */
802 printf("%s: WARNING: TCB hash size not a power of 2, "
803 "clipped from %d to %d.\n", __func__, oldhashsize,
806 in_pcbinfo_init(&V_tcbinfo, "tcp", &V_tcb, hashsize, hashsize,
807 "tcp_inpcb", tcp_inpcb_init, IPI_HASHFIELDS_4TUPLE);
810 * These have to be type stable for the benefit of the timers.
812 V_tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
813 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
814 uma_zone_set_max(V_tcpcb_zone, maxsockets);
815 uma_zone_set_warning(V_tcpcb_zone, "kern.ipc.maxsockets limit reached");
821 TUNABLE_INT_FETCH("net.inet.tcp.sack.enable", &V_tcp_do_sack);
822 V_sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
823 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
827 /* Skip initialization of globals for non-default instances. */
828 if (!IS_DEFAULT_VNET(curvnet))
831 tcp_reass_global_init();
833 /* XXX virtualize those bellow? */
834 tcp_delacktime = TCPTV_DELACK;
835 tcp_keepinit = TCPTV_KEEP_INIT;
836 tcp_keepidle = TCPTV_KEEP_IDLE;
837 tcp_keepintvl = TCPTV_KEEPINTVL;
838 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
840 tcp_rexmit_min = TCPTV_MIN;
841 if (tcp_rexmit_min < 1)
843 tcp_persmin = TCPTV_PERSMIN;
844 tcp_persmax = TCPTV_PERSMAX;
845 tcp_rexmit_slop = TCPTV_CPU_VAR;
846 tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT;
847 tcp_tcbhashsize = hashsize;
848 /* Setup the tcp function block list */
849 init_tcp_functions();
850 register_tcp_functions(&tcp_def_funcblk, M_WAITOK);
852 /* Initialize the TCP logging data. */
856 if (tcp_soreceive_stream) {
858 tcp_usrreqs.pru_soreceive = soreceive_stream;
861 tcp6_usrreqs.pru_soreceive = soreceive_stream;
866 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
868 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
870 if (max_protohdr < TCP_MINPROTOHDR)
871 max_protohdr = TCP_MINPROTOHDR;
872 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
874 #undef TCP_MINPROTOHDR
877 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
878 SHUTDOWN_PRI_DEFAULT);
879 EVENTHANDLER_REGISTER(maxsockets_change, tcp_zone_change, NULL,
880 EVENTHANDLER_PRI_ANY);
888 tcp_destroy(void *unused __unused)
896 * All our processes are gone, all our sockets should be cleaned
897 * up, which means, we should be past the tcp_discardcb() calls.
898 * Sleep to let all tcpcb timers really disappear and cleanup.
901 INP_LIST_RLOCK(&V_tcbinfo);
902 n = V_tcbinfo.ipi_count;
903 INP_LIST_RUNLOCK(&V_tcbinfo);
906 pause("tcpdes", hz / 10);
911 in_pcbinfo_destroy(&V_tcbinfo);
912 /* tcp_discardcb() clears the sack_holes up. */
913 uma_zdestroy(V_sack_hole_zone);
914 uma_zdestroy(V_tcpcb_zone);
917 * Cannot free the zone until all tcpcbs are released as we attach
918 * the allocations to them.
920 tcp_fastopen_destroy();
923 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_IN]);
925 printf("%s: WARNING: unable to deregister helper hook "
926 "type=%d, id=%d: error %d returned\n", __func__,
927 HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, error);
929 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_OUT]);
931 printf("%s: WARNING: unable to deregister helper hook "
932 "type=%d, id=%d: error %d returned\n", __func__,
933 HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, error);
937 VNET_SYSUNINIT(tcp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, tcp_destroy, NULL);
947 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
948 * tcp_template used to store this data in mbufs, but we now recopy it out
949 * of the tcpcb each time to conserve mbufs.
952 tcpip_fillheaders(struct inpcb *inp, void *ip_ptr, void *tcp_ptr)
954 struct tcphdr *th = (struct tcphdr *)tcp_ptr;
956 INP_WLOCK_ASSERT(inp);
959 if ((inp->inp_vflag & INP_IPV6) != 0) {
962 ip6 = (struct ip6_hdr *)ip_ptr;
963 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
964 (inp->inp_flow & IPV6_FLOWINFO_MASK);
965 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
966 (IPV6_VERSION & IPV6_VERSION_MASK);
967 ip6->ip6_nxt = IPPROTO_TCP;
968 ip6->ip6_plen = htons(sizeof(struct tcphdr));
969 ip6->ip6_src = inp->in6p_laddr;
970 ip6->ip6_dst = inp->in6p_faddr;
973 #if defined(INET6) && defined(INET)
980 ip = (struct ip *)ip_ptr;
981 ip->ip_v = IPVERSION;
983 ip->ip_tos = inp->inp_ip_tos;
987 ip->ip_ttl = inp->inp_ip_ttl;
989 ip->ip_p = IPPROTO_TCP;
990 ip->ip_src = inp->inp_laddr;
991 ip->ip_dst = inp->inp_faddr;
994 th->th_sport = inp->inp_lport;
995 th->th_dport = inp->inp_fport;
1003 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */
1007 * Create template to be used to send tcp packets on a connection.
1008 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
1009 * use for this function is in keepalives, which use tcp_respond.
1012 tcpip_maketemplate(struct inpcb *inp)
1016 t = malloc(sizeof(*t), M_TEMP, M_NOWAIT);
1019 tcpip_fillheaders(inp, (void *)&t->tt_ipgen, (void *)&t->tt_t);
1024 * Send a single message to the TCP at address specified by
1025 * the given TCP/IP header. If m == NULL, then we make a copy
1026 * of the tcpiphdr at th and send directly to the addressed host.
1027 * This is used to force keep alive messages out using the TCP
1028 * template for a connection. If flags are given then we send
1029 * a message back to the TCP which originated the segment th,
1030 * and discard the mbuf containing it and any other attached mbufs.
1032 * In any case the ack and sequence number of the transmitted
1033 * segment are as specified by the parameters.
1035 * NOTE: If m != NULL, then th must point to *inside* the mbuf.
1038 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
1039 tcp_seq ack, tcp_seq seq, int flags)
1048 struct ip6_hdr *ip6;
1051 int optlen, tlen, win;
1054 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
1057 isipv6 = ((struct ip *)ipgen)->ip_v == (IPV6_VERSION >> 4);
1064 KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
1065 INP_WLOCK_ASSERT(inp);
1072 if (!(flags & TH_RST)) {
1073 win = sbspace(&inp->inp_socket->so_rcv);
1074 if (win > TCP_MAXWIN << tp->rcv_scale)
1075 win = TCP_MAXWIN << tp->rcv_scale;
1077 if ((tp->t_flags & TF_NOOPT) == 0)
1081 m = m_gethdr(M_NOWAIT, MT_DATA);
1084 m->m_data += max_linkhdr;
1087 bcopy((caddr_t)ip6, mtod(m, caddr_t),
1088 sizeof(struct ip6_hdr));
1089 ip6 = mtod(m, struct ip6_hdr *);
1090 nth = (struct tcphdr *)(ip6 + 1);
1094 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
1095 ip = mtod(m, struct ip *);
1096 nth = (struct tcphdr *)(ip + 1);
1098 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
1100 } else if (!M_WRITABLE(m)) {
1103 /* Can't reuse 'm', allocate a new mbuf. */
1104 n = m_gethdr(M_NOWAIT, MT_DATA);
1110 if (!m_dup_pkthdr(n, m, M_NOWAIT)) {
1116 n->m_data += max_linkhdr;
1117 /* m_len is set later */
1118 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
1121 bcopy((caddr_t)ip6, mtod(n, caddr_t),
1122 sizeof(struct ip6_hdr));
1123 ip6 = mtod(n, struct ip6_hdr *);
1124 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
1125 nth = (struct tcphdr *)(ip6 + 1);
1129 bcopy((caddr_t)ip, mtod(n, caddr_t), sizeof(struct ip));
1130 ip = mtod(n, struct ip *);
1131 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
1132 nth = (struct tcphdr *)(ip + 1);
1134 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
1135 xchg(nth->th_dport, nth->th_sport, uint16_t);
1142 * XXX MRT We inherit the FIB, which is lucky.
1146 m->m_data = (caddr_t)ipgen;
1147 /* m_len is set later */
1150 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
1151 nth = (struct tcphdr *)(ip6 + 1);
1155 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
1156 nth = (struct tcphdr *)(ip + 1);
1160 * this is usually a case when an extension header
1161 * exists between the IPv6 header and the
1164 nth->th_sport = th->th_sport;
1165 nth->th_dport = th->th_dport;
1167 xchg(nth->th_dport, nth->th_sport, uint16_t);
1173 tlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
1175 #if defined(INET) && defined(INET6)
1179 tlen = sizeof (struct tcpiphdr);
1183 KASSERT(M_TRAILINGSPACE(m) >= tlen,
1184 ("Not enough trailing space for message (m=%p, need=%d, have=%ld)",
1185 m, tlen, (long)M_TRAILINGSPACE(m)));
1190 /* Make sure we have room. */
1191 if (M_TRAILINGSPACE(m) < TCP_MAXOLEN) {
1192 m->m_next = m_get(M_NOWAIT, MT_DATA);
1194 optp = mtod(m->m_next, u_char *);
1199 optp = (u_char *) (nth + 1);
1205 if (tp->t_flags & TF_RCVD_TSTMP) {
1206 to.to_tsval = tcp_ts_getticks() + tp->ts_offset;
1207 to.to_tsecr = tp->ts_recent;
1208 to.to_flags |= TOF_TS;
1210 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1211 /* TCP-MD5 (RFC2385). */
1212 if (tp->t_flags & TF_SIGNATURE)
1213 to.to_flags |= TOF_SIGNATURE;
1215 /* Add the options. */
1216 tlen += optlen = tcp_addoptions(&to, optp);
1218 /* Update m_len in the correct mbuf. */
1219 optm->m_len += optlen;
1225 ip6->ip6_vfc = IPV6_VERSION;
1226 ip6->ip6_nxt = IPPROTO_TCP;
1227 ip6->ip6_plen = htons(tlen - sizeof(*ip6));
1230 #if defined(INET) && defined(INET6)
1235 ip->ip_len = htons(tlen);
1236 ip->ip_ttl = V_ip_defttl;
1237 if (V_path_mtu_discovery)
1238 ip->ip_off |= htons(IP_DF);
1241 m->m_pkthdr.len = tlen;
1242 m->m_pkthdr.rcvif = NULL;
1246 * Packet is associated with a socket, so allow the
1247 * label of the response to reflect the socket label.
1249 INP_WLOCK_ASSERT(inp);
1250 mac_inpcb_create_mbuf(inp, m);
1253 * Packet is not associated with a socket, so possibly
1254 * update the label in place.
1256 mac_netinet_tcp_reply(m);
1259 nth->th_seq = htonl(seq);
1260 nth->th_ack = htonl(ack);
1262 nth->th_off = (sizeof (struct tcphdr) + optlen) >> 2;
1263 nth->th_flags = flags;
1265 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
1267 nth->th_win = htons((u_short)win);
1270 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1271 if (to.to_flags & TOF_SIGNATURE) {
1272 if (!TCPMD5_ENABLED() ||
1273 TCPMD5_OUTPUT(m, nth, to.to_signature) != 0) {
1280 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1283 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
1284 nth->th_sum = in6_cksum_pseudo(ip6,
1285 tlen - sizeof(struct ip6_hdr), IPPROTO_TCP, 0);
1286 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
1290 #if defined(INET6) && defined(INET)
1295 m->m_pkthdr.csum_flags = CSUM_TCP;
1296 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1297 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
1301 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
1302 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
1304 TCP_PROBE3(debug__output, tp, th, m);
1306 TCP_PROBE5(accept__refused, NULL, NULL, m, tp, nth);
1310 TCP_PROBE5(send, NULL, tp, ip6, tp, nth);
1311 (void)ip6_output(m, NULL, NULL, 0, NULL, NULL, inp);
1314 #if defined(INET) && defined(INET6)
1319 TCP_PROBE5(send, NULL, tp, ip, tp, nth);
1320 (void)ip_output(m, NULL, NULL, 0, NULL, inp);
1326 * Create a new TCP control block, making an
1327 * empty reassembly queue and hooking it to the argument
1328 * protocol control block. The `inp' parameter must have
1329 * come from the zone allocator set up in tcp_init().
1332 tcp_newtcpcb(struct inpcb *inp)
1334 struct tcpcb_mem *tm;
1337 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1340 tm = uma_zalloc(V_tcpcb_zone, M_NOWAIT | M_ZERO);
1345 /* Initialise cc_var struct for this tcpcb. */
1347 tp->ccv->type = IPPROTO_TCP;
1348 tp->ccv->ccvc.tcp = tp;
1349 rw_rlock(&tcp_function_lock);
1350 tp->t_fb = tcp_func_set_ptr;
1351 refcount_acquire(&tp->t_fb->tfb_refcnt);
1352 rw_runlock(&tcp_function_lock);
1354 * Use the current system default CC algorithm.
1357 KASSERT(!STAILQ_EMPTY(&cc_list), ("cc_list is empty!"));
1358 CC_ALGO(tp) = CC_DEFAULT();
1361 if (CC_ALGO(tp)->cb_init != NULL)
1362 if (CC_ALGO(tp)->cb_init(tp->ccv) > 0) {
1363 if (tp->t_fb->tfb_tcp_fb_fini)
1364 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
1365 refcount_release(&tp->t_fb->tfb_refcnt);
1366 uma_zfree(V_tcpcb_zone, tm);
1372 if (khelp_init_osd(HELPER_CLASS_TCP, tp->osd)) {
1373 if (tp->t_fb->tfb_tcp_fb_fini)
1374 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
1375 refcount_release(&tp->t_fb->tfb_refcnt);
1376 uma_zfree(V_tcpcb_zone, tm);
1382 tp->t_vnet = inp->inp_vnet;
1384 tp->t_timers = &tm->tt;
1385 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */
1388 isipv6 ? V_tcp_v6mssdflt :
1392 /* Set up our timeouts. */
1393 callout_init(&tp->t_timers->tt_rexmt, 1);
1394 callout_init(&tp->t_timers->tt_persist, 1);
1395 callout_init(&tp->t_timers->tt_keep, 1);
1396 callout_init(&tp->t_timers->tt_2msl, 1);
1397 callout_init(&tp->t_timers->tt_delack, 1);
1399 if (V_tcp_do_rfc1323)
1400 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
1402 tp->t_flags |= TF_SACK_PERMIT;
1403 TAILQ_INIT(&tp->snd_holes);
1405 * The tcpcb will hold a reference on its inpcb until tcp_discardcb()
1408 in_pcbref(inp); /* Reference for tcpcb */
1412 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
1413 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
1414 * reasonable initial retransmit time.
1416 tp->t_srtt = TCPTV_SRTTBASE;
1417 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1418 tp->t_rttmin = tcp_rexmit_min;
1419 tp->t_rxtcur = TCPTV_RTOBASE;
1420 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1421 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1422 tp->t_rcvtime = ticks;
1424 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
1425 * because the socket may be bound to an IPv6 wildcard address,
1426 * which may match an IPv4-mapped IPv6 address.
1428 inp->inp_ip_ttl = V_ip_defttl;
1432 * Init the TCP PCAP queues.
1434 tcp_pcap_tcpcb_init(tp);
1437 /* Initialize the per-TCPCB log data. */
1438 tcp_log_tcpcbinit(tp);
1440 if (tp->t_fb->tfb_tcp_fb_init) {
1441 (*tp->t_fb->tfb_tcp_fb_init)(tp);
1443 return (tp); /* XXX */
1447 * Switch the congestion control algorithm back to NewReno for any active
1448 * control blocks using an algorithm which is about to go away.
1449 * This ensures the CC framework can allow the unload to proceed without leaving
1450 * any dangling pointers which would trigger a panic.
1451 * Returning non-zero would inform the CC framework that something went wrong
1452 * and it would be unsafe to allow the unload to proceed. However, there is no
1453 * way for this to occur with this implementation so we always return zero.
1456 tcp_ccalgounload(struct cc_algo *unload_algo)
1458 struct cc_algo *tmpalgo;
1461 VNET_ITERATOR_DECL(vnet_iter);
1464 * Check all active control blocks across all network stacks and change
1465 * any that are using "unload_algo" back to NewReno. If "unload_algo"
1466 * requires cleanup code to be run, call it.
1469 VNET_FOREACH(vnet_iter) {
1470 CURVNET_SET(vnet_iter);
1471 INP_INFO_WLOCK(&V_tcbinfo);
1473 * New connections already part way through being initialised
1474 * with the CC algo we're removing will not race with this code
1475 * because the INP_INFO_WLOCK is held during initialisation. We
1476 * therefore don't enter the loop below until the connection
1477 * list has stabilised.
1479 LIST_FOREACH(inp, &V_tcb, inp_list) {
1481 /* Important to skip tcptw structs. */
1482 if (!(inp->inp_flags & INP_TIMEWAIT) &&
1483 (tp = intotcpcb(inp)) != NULL) {
1485 * By holding INP_WLOCK here, we are assured
1486 * that the connection is not currently
1487 * executing inside the CC module's functions
1488 * i.e. it is safe to make the switch back to
1491 if (CC_ALGO(tp) == unload_algo) {
1492 tmpalgo = CC_ALGO(tp);
1493 /* NewReno does not require any init. */
1494 CC_ALGO(tp) = &newreno_cc_algo;
1495 if (tmpalgo->cb_destroy != NULL)
1496 tmpalgo->cb_destroy(tp->ccv);
1501 INP_INFO_WUNLOCK(&V_tcbinfo);
1504 VNET_LIST_RUNLOCK();
1510 * Drop a TCP connection, reporting
1511 * the specified error. If connection is synchronized,
1512 * then send a RST to peer.
1515 tcp_drop(struct tcpcb *tp, int errno)
1517 struct socket *so = tp->t_inpcb->inp_socket;
1519 INP_INFO_LOCK_ASSERT(&V_tcbinfo);
1520 INP_WLOCK_ASSERT(tp->t_inpcb);
1522 if (TCPS_HAVERCVDSYN(tp->t_state)) {
1523 tcp_state_change(tp, TCPS_CLOSED);
1524 (void) tp->t_fb->tfb_tcp_output(tp);
1525 TCPSTAT_INC(tcps_drops);
1527 TCPSTAT_INC(tcps_conndrops);
1528 if (errno == ETIMEDOUT && tp->t_softerror)
1529 errno = tp->t_softerror;
1530 so->so_error = errno;
1531 return (tcp_close(tp));
1535 tcp_discardcb(struct tcpcb *tp)
1537 struct inpcb *inp = tp->t_inpcb;
1538 struct socket *so = inp->inp_socket;
1540 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1544 INP_WLOCK_ASSERT(inp);
1547 * Make sure that all of our timers are stopped before we delete the
1550 * If stopping a timer fails, we schedule a discard function in same
1551 * callout, and the last discard function called will take care of
1552 * deleting the tcpcb.
1554 tp->t_timers->tt_draincnt = 0;
1555 tcp_timer_stop(tp, TT_REXMT);
1556 tcp_timer_stop(tp, TT_PERSIST);
1557 tcp_timer_stop(tp, TT_KEEP);
1558 tcp_timer_stop(tp, TT_2MSL);
1559 tcp_timer_stop(tp, TT_DELACK);
1560 if (tp->t_fb->tfb_tcp_timer_stop_all) {
1562 * Call the stop-all function of the methods,
1563 * this function should call the tcp_timer_stop()
1564 * method with each of the function specific timeouts.
1565 * That stop will be called via the tfb_tcp_timer_stop()
1566 * which should use the async drain function of the
1567 * callout system (see tcp_var.h).
1569 tp->t_fb->tfb_tcp_timer_stop_all(tp);
1573 * If we got enough samples through the srtt filter,
1574 * save the rtt and rttvar in the routing entry.
1575 * 'Enough' is arbitrarily defined as 4 rtt samples.
1576 * 4 samples is enough for the srtt filter to converge
1577 * to within enough % of the correct value; fewer samples
1578 * and we could save a bogus rtt. The danger is not high
1579 * as tcp quickly recovers from everything.
1580 * XXX: Works very well but needs some more statistics!
1582 if (tp->t_rttupdated >= 4) {
1583 struct hc_metrics_lite metrics;
1586 bzero(&metrics, sizeof(metrics));
1588 * Update the ssthresh always when the conditions below
1589 * are satisfied. This gives us better new start value
1590 * for the congestion avoidance for new connections.
1591 * ssthresh is only set if packet loss occurred on a session.
1593 * XXXRW: 'so' may be NULL here, and/or socket buffer may be
1594 * being torn down. Ideally this code would not use 'so'.
1596 ssthresh = tp->snd_ssthresh;
1597 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
1599 * convert the limit from user data bytes to
1600 * packets then to packet data bytes.
1602 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
1605 ssthresh *= (tp->t_maxseg +
1607 (isipv6 ? sizeof (struct ip6_hdr) +
1608 sizeof (struct tcphdr) :
1610 sizeof (struct tcpiphdr)
1617 metrics.rmx_ssthresh = ssthresh;
1619 metrics.rmx_rtt = tp->t_srtt;
1620 metrics.rmx_rttvar = tp->t_rttvar;
1621 metrics.rmx_cwnd = tp->snd_cwnd;
1622 metrics.rmx_sendpipe = 0;
1623 metrics.rmx_recvpipe = 0;
1625 tcp_hc_update(&inp->inp_inc, &metrics);
1628 /* free the reassembly queue, if any */
1629 tcp_reass_flush(tp);
1632 /* Disconnect offload device, if any. */
1633 if (tp->t_flags & TF_TOE)
1634 tcp_offload_detach(tp);
1637 tcp_free_sackholes(tp);
1640 /* Free the TCP PCAP queues. */
1641 tcp_pcap_drain(&(tp->t_inpkts));
1642 tcp_pcap_drain(&(tp->t_outpkts));
1645 /* Allow the CC algorithm to clean up after itself. */
1646 if (CC_ALGO(tp)->cb_destroy != NULL)
1647 CC_ALGO(tp)->cb_destroy(tp->ccv);
1650 khelp_destroy_osd(tp->osd);
1654 inp->inp_ppcb = NULL;
1655 if (tp->t_timers->tt_draincnt == 0) {
1656 /* We own the last reference on tcpcb, let's free it. */
1658 tcp_log_tcpcbfini(tp);
1660 TCPSTATES_DEC(tp->t_state);
1661 if (tp->t_fb->tfb_tcp_fb_fini)
1662 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
1663 refcount_release(&tp->t_fb->tfb_refcnt);
1665 uma_zfree(V_tcpcb_zone, tp);
1666 released = in_pcbrele_wlocked(inp);
1667 KASSERT(!released, ("%s: inp %p should not have been released "
1668 "here", __func__, inp));
1673 tcp_timer_discard(void *ptp)
1678 tp = (struct tcpcb *)ptp;
1679 CURVNET_SET(tp->t_vnet);
1680 INP_INFO_RLOCK(&V_tcbinfo);
1682 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL",
1685 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) != 0,
1686 ("%s: tcpcb has to be stopped here", __func__));
1687 tp->t_timers->tt_draincnt--;
1688 if (tp->t_timers->tt_draincnt == 0) {
1689 /* We own the last reference on this tcpcb, let's free it. */
1691 tcp_log_tcpcbfini(tp);
1693 TCPSTATES_DEC(tp->t_state);
1694 if (tp->t_fb->tfb_tcp_fb_fini)
1695 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
1696 refcount_release(&tp->t_fb->tfb_refcnt);
1698 uma_zfree(V_tcpcb_zone, tp);
1699 if (in_pcbrele_wlocked(inp)) {
1700 INP_INFO_RUNLOCK(&V_tcbinfo);
1706 INP_INFO_RUNLOCK(&V_tcbinfo);
1711 * Attempt to close a TCP control block, marking it as dropped, and freeing
1712 * the socket if we hold the only reference.
1715 tcp_close(struct tcpcb *tp)
1717 struct inpcb *inp = tp->t_inpcb;
1720 INP_INFO_LOCK_ASSERT(&V_tcbinfo);
1721 INP_WLOCK_ASSERT(inp);
1724 if (tp->t_state == TCPS_LISTEN)
1725 tcp_offload_listen_stop(tp);
1728 * This releases the TFO pending counter resource for TFO listen
1729 * sockets as well as passively-created TFO sockets that transition
1730 * from SYN_RECEIVED to CLOSED.
1732 if (tp->t_tfo_pending) {
1733 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
1734 tp->t_tfo_pending = NULL;
1737 TCPSTAT_INC(tcps_closed);
1738 if (tp->t_state != TCPS_CLOSED)
1739 tcp_state_change(tp, TCPS_CLOSED);
1740 KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
1741 so = inp->inp_socket;
1742 soisdisconnected(so);
1743 if (inp->inp_flags & INP_SOCKREF) {
1744 KASSERT(so->so_state & SS_PROTOREF,
1745 ("tcp_close: !SS_PROTOREF"));
1746 inp->inp_flags &= ~INP_SOCKREF;
1749 so->so_state &= ~SS_PROTOREF;
1759 VNET_ITERATOR_DECL(vnet_iter);
1764 VNET_LIST_RLOCK_NOSLEEP();
1765 VNET_FOREACH(vnet_iter) {
1766 CURVNET_SET(vnet_iter);
1771 * Walk the tcpbs, if existing, and flush the reassembly queue,
1772 * if there is one...
1773 * XXX: The "Net/3" implementation doesn't imply that the TCP
1774 * reassembly queue should be flushed, but in a situation
1775 * where we're really low on mbufs, this is potentially
1778 INP_INFO_WLOCK(&V_tcbinfo);
1779 LIST_FOREACH(inpb, V_tcbinfo.ipi_listhead, inp_list) {
1780 if (inpb->inp_flags & INP_TIMEWAIT)
1783 if ((tcpb = intotcpcb(inpb)) != NULL) {
1784 tcp_reass_flush(tcpb);
1785 tcp_clean_sackreport(tcpb);
1787 tcp_log_drain(tcpb);
1790 if (tcp_pcap_aggressive_free) {
1791 /* Free the TCP PCAP queues. */
1792 tcp_pcap_drain(&(tcpb->t_inpkts));
1793 tcp_pcap_drain(&(tcpb->t_outpkts));
1799 INP_INFO_WUNLOCK(&V_tcbinfo);
1802 VNET_LIST_RUNLOCK_NOSLEEP();
1806 * Notify a tcp user of an asynchronous error;
1807 * store error as soft error, but wake up user
1808 * (for now, won't do anything until can select for soft error).
1810 * Do not wake up user since there currently is no mechanism for
1811 * reporting soft errors (yet - a kqueue filter may be added).
1813 static struct inpcb *
1814 tcp_notify(struct inpcb *inp, int error)
1818 INP_INFO_LOCK_ASSERT(&V_tcbinfo);
1819 INP_WLOCK_ASSERT(inp);
1821 if ((inp->inp_flags & INP_TIMEWAIT) ||
1822 (inp->inp_flags & INP_DROPPED))
1825 tp = intotcpcb(inp);
1826 KASSERT(tp != NULL, ("tcp_notify: tp == NULL"));
1829 * Ignore some errors if we are hooked up.
1830 * If connection hasn't completed, has retransmitted several times,
1831 * and receives a second error, give up now. This is better
1832 * than waiting a long time to establish a connection that
1833 * can never complete.
1835 if (tp->t_state == TCPS_ESTABLISHED &&
1836 (error == EHOSTUNREACH || error == ENETUNREACH ||
1837 error == EHOSTDOWN)) {
1838 if (inp->inp_route.ro_rt) {
1839 RTFREE(inp->inp_route.ro_rt);
1840 inp->inp_route.ro_rt = (struct rtentry *)NULL;
1843 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
1845 tp = tcp_drop(tp, error);
1851 tp->t_softerror = error;
1855 wakeup( &so->so_timeo);
1862 tcp_pcblist(SYSCTL_HANDLER_ARGS)
1864 int error, i, m, n, pcb_count;
1865 struct inpcb *inp, **inp_list;
1870 * The process of preparing the TCB list is too time-consuming and
1871 * resource-intensive to repeat twice on every request.
1873 if (req->oldptr == NULL) {
1874 n = V_tcbinfo.ipi_count +
1875 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
1876 n += imax(n / 8, 10);
1877 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb);
1881 if (req->newptr != NULL)
1885 * OK, now we're committed to doing something.
1887 INP_LIST_RLOCK(&V_tcbinfo);
1888 gencnt = V_tcbinfo.ipi_gencnt;
1889 n = V_tcbinfo.ipi_count;
1890 INP_LIST_RUNLOCK(&V_tcbinfo);
1892 m = counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
1894 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
1895 + (n + m) * sizeof(struct xtcpcb));
1899 xig.xig_len = sizeof xig;
1900 xig.xig_count = n + m;
1901 xig.xig_gen = gencnt;
1902 xig.xig_sogen = so_gencnt;
1903 error = SYSCTL_OUT(req, &xig, sizeof xig);
1907 error = syncache_pcblist(req, m, &pcb_count);
1911 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1913 INP_INFO_WLOCK(&V_tcbinfo);
1914 for (inp = LIST_FIRST(V_tcbinfo.ipi_listhead), i = 0;
1915 inp != NULL && i < n; inp = LIST_NEXT(inp, inp_list)) {
1917 if (inp->inp_gencnt <= gencnt) {
1919 * XXX: This use of cr_cansee(), introduced with
1920 * TCP state changes, is not quite right, but for
1921 * now, better than nothing.
1923 if (inp->inp_flags & INP_TIMEWAIT) {
1924 if (intotw(inp) != NULL)
1925 error = cr_cansee(req->td->td_ucred,
1926 intotw(inp)->tw_cred);
1928 error = EINVAL; /* Skip this inp. */
1930 error = cr_canseeinpcb(req->td->td_ucred, inp);
1933 inp_list[i++] = inp;
1938 INP_INFO_WUNLOCK(&V_tcbinfo);
1942 for (i = 0; i < n; i++) {
1945 if (inp->inp_gencnt <= gencnt) {
1948 tcp_inptoxtp(inp, &xt);
1950 error = SYSCTL_OUT(req, &xt, sizeof xt);
1954 INP_INFO_RLOCK(&V_tcbinfo);
1955 for (i = 0; i < n; i++) {
1958 if (!in_pcbrele_rlocked(inp))
1961 INP_INFO_RUNLOCK(&V_tcbinfo);
1965 * Give the user an updated idea of our state.
1966 * If the generation differs from what we told
1967 * her before, she knows that something happened
1968 * while we were processing this request, and it
1969 * might be necessary to retry.
1971 INP_LIST_RLOCK(&V_tcbinfo);
1972 xig.xig_gen = V_tcbinfo.ipi_gencnt;
1973 xig.xig_sogen = so_gencnt;
1974 xig.xig_count = V_tcbinfo.ipi_count + pcb_count;
1975 INP_LIST_RUNLOCK(&V_tcbinfo);
1976 error = SYSCTL_OUT(req, &xig, sizeof xig);
1978 free(inp_list, M_TEMP);
1982 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
1983 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
1984 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
1988 tcp_getcred(SYSCTL_HANDLER_ARGS)
1991 struct sockaddr_in addrs[2];
1995 error = priv_check(req->td, PRIV_NETINET_GETCRED);
1998 error = SYSCTL_IN(req, addrs, sizeof(addrs));
2001 inp = in_pcblookup(&V_tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
2002 addrs[0].sin_addr, addrs[0].sin_port, INPLOOKUP_RLOCKPCB, NULL);
2004 if (inp->inp_socket == NULL)
2007 error = cr_canseeinpcb(req->td->td_ucred, inp);
2009 cru2x(inp->inp_cred, &xuc);
2014 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
2018 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
2019 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
2020 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection");
2025 tcp6_getcred(SYSCTL_HANDLER_ARGS)
2028 struct sockaddr_in6 addrs[2];
2035 error = priv_check(req->td, PRIV_NETINET_GETCRED);
2038 error = SYSCTL_IN(req, addrs, sizeof(addrs));
2041 if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 ||
2042 (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) {
2045 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
2047 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
2056 inp = in_pcblookup(&V_tcbinfo,
2057 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
2059 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
2060 addrs[0].sin6_port, INPLOOKUP_RLOCKPCB, NULL);
2063 inp = in6_pcblookup(&V_tcbinfo,
2064 &addrs[1].sin6_addr, addrs[1].sin6_port,
2065 &addrs[0].sin6_addr, addrs[0].sin6_port,
2066 INPLOOKUP_RLOCKPCB, NULL);
2068 if (inp->inp_socket == NULL)
2071 error = cr_canseeinpcb(req->td->td_ucred, inp);
2073 cru2x(inp->inp_cred, &xuc);
2078 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
2082 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
2083 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
2084 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection");
2090 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
2092 struct ip *ip = vip;
2094 struct in_addr faddr;
2097 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
2099 struct in_conninfo inc;
2100 tcp_seq icmp_tcp_seq;
2103 faddr = ((struct sockaddr_in *)sa)->sin_addr;
2104 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
2107 if (cmd == PRC_MSGSIZE)
2108 notify = tcp_mtudisc_notify;
2109 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2110 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
2111 cmd == PRC_TIMXCEED_INTRANS) && ip)
2112 notify = tcp_drop_syn_sent;
2115 * Hostdead is ugly because it goes linearly through all PCBs.
2116 * XXX: We never get this from ICMP, otherwise it makes an
2117 * excellent DoS attack on machines with many connections.
2119 else if (cmd == PRC_HOSTDEAD)
2121 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
2125 in_pcbnotifyall(&V_tcbinfo, faddr, inetctlerrmap[cmd], notify);
2129 icp = (struct icmp *)((caddr_t)ip - offsetof(struct icmp, icmp_ip));
2130 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2131 INP_INFO_RLOCK(&V_tcbinfo);
2132 inp = in_pcblookup(&V_tcbinfo, faddr, th->th_dport, ip->ip_src,
2133 th->th_sport, INPLOOKUP_WLOCKPCB, NULL);
2134 if (inp != NULL && PRC_IS_REDIRECT(cmd)) {
2135 /* signal EHOSTDOWN, as it flushes the cached route */
2136 inp = (*notify)(inp, EHOSTDOWN);
2139 icmp_tcp_seq = th->th_seq;
2141 if (!(inp->inp_flags & INP_TIMEWAIT) &&
2142 !(inp->inp_flags & INP_DROPPED) &&
2143 !(inp->inp_socket == NULL)) {
2144 tp = intotcpcb(inp);
2145 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) &&
2146 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) {
2147 if (cmd == PRC_MSGSIZE) {
2150 * If we got a needfrag set the MTU
2151 * in the route to the suggested new
2152 * value (if given) and then notify.
2154 mtu = ntohs(icp->icmp_nextmtu);
2156 * If no alternative MTU was
2157 * proposed, try the next smaller
2162 ntohs(ip->ip_len), 1);
2163 if (mtu < V_tcp_minmss +
2164 sizeof(struct tcpiphdr))
2165 mtu = V_tcp_minmss +
2166 sizeof(struct tcpiphdr);
2168 * Only process the offered MTU if it
2169 * is smaller than the current one.
2171 if (mtu < tp->t_maxseg +
2172 sizeof(struct tcpiphdr)) {
2173 bzero(&inc, sizeof(inc));
2174 inc.inc_faddr = faddr;
2176 inp->inp_inc.inc_fibnum;
2177 tcp_hc_updatemtu(&inc, mtu);
2178 tcp_mtudisc(inp, mtu);
2181 inp = (*notify)(inp,
2182 inetctlerrmap[cmd]);
2186 bzero(&inc, sizeof(inc));
2187 inc.inc_fport = th->th_dport;
2188 inc.inc_lport = th->th_sport;
2189 inc.inc_faddr = faddr;
2190 inc.inc_laddr = ip->ip_src;
2191 syncache_unreach(&inc, icmp_tcp_seq);
2196 INP_INFO_RUNLOCK(&V_tcbinfo);
2202 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
2204 struct in6_addr *dst;
2205 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
2206 struct ip6_hdr *ip6;
2210 struct icmp6_hdr *icmp6;
2211 struct ip6ctlparam *ip6cp = NULL;
2212 const struct sockaddr_in6 *sa6_src = NULL;
2213 struct in_conninfo inc;
2218 tcp_seq icmp_tcp_seq;
2222 if (sa->sa_family != AF_INET6 ||
2223 sa->sa_len != sizeof(struct sockaddr_in6))
2226 /* if the parameter is from icmp6, decode it. */
2228 ip6cp = (struct ip6ctlparam *)d;
2229 icmp6 = ip6cp->ip6c_icmp6;
2231 ip6 = ip6cp->ip6c_ip6;
2232 off = ip6cp->ip6c_off;
2233 sa6_src = ip6cp->ip6c_src;
2234 dst = ip6cp->ip6c_finaldst;
2238 off = 0; /* fool gcc */
2243 if (cmd == PRC_MSGSIZE)
2244 notify = tcp_mtudisc_notify;
2245 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2246 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
2247 cmd == PRC_TIMXCEED_INTRANS) && ip6 != NULL)
2248 notify = tcp_drop_syn_sent;
2251 * Hostdead is ugly because it goes linearly through all PCBs.
2252 * XXX: We never get this from ICMP, otherwise it makes an
2253 * excellent DoS attack on machines with many connections.
2255 else if (cmd == PRC_HOSTDEAD)
2257 else if ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0)
2261 in6_pcbnotify(&V_tcbinfo, sa, 0,
2262 (const struct sockaddr *)sa6_src,
2263 0, cmd, NULL, notify);
2267 /* Check if we can safely get the ports from the tcp hdr */
2270 (int32_t) (off + sizeof(struct tcp_ports)))) {
2273 bzero(&t_ports, sizeof(struct tcp_ports));
2274 m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports);
2275 INP_INFO_RLOCK(&V_tcbinfo);
2276 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_dst, t_ports.th_dport,
2277 &ip6->ip6_src, t_ports.th_sport, INPLOOKUP_WLOCKPCB, NULL);
2278 if (inp != NULL && PRC_IS_REDIRECT(cmd)) {
2279 /* signal EHOSTDOWN, as it flushes the cached route */
2280 inp = (*notify)(inp, EHOSTDOWN);
2283 off += sizeof(struct tcp_ports);
2284 if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) {
2287 m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq);
2289 if (!(inp->inp_flags & INP_TIMEWAIT) &&
2290 !(inp->inp_flags & INP_DROPPED) &&
2291 !(inp->inp_socket == NULL)) {
2292 tp = intotcpcb(inp);
2293 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) &&
2294 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) {
2295 if (cmd == PRC_MSGSIZE) {
2298 * If we got a needfrag set the MTU
2299 * in the route to the suggested new
2300 * value (if given) and then notify.
2302 mtu = ntohl(icmp6->icmp6_mtu);
2304 * If no alternative MTU was
2305 * proposed, or the proposed
2306 * MTU was too small, set to
2309 if (mtu < IPV6_MMTU)
2310 mtu = IPV6_MMTU - 8;
2311 bzero(&inc, sizeof(inc));
2312 inc.inc_fibnum = M_GETFIB(m);
2313 inc.inc_flags |= INC_ISIPV6;
2314 inc.inc6_faddr = *dst;
2315 if (in6_setscope(&inc.inc6_faddr,
2316 m->m_pkthdr.rcvif, NULL))
2319 * Only process the offered MTU if it
2320 * is smaller than the current one.
2322 if (mtu < tp->t_maxseg +
2323 sizeof (struct tcphdr) +
2324 sizeof (struct ip6_hdr)) {
2325 tcp_hc_updatemtu(&inc, mtu);
2326 tcp_mtudisc(inp, mtu);
2327 ICMP6STAT_INC(icp6s_pmtuchg);
2330 inp = (*notify)(inp,
2331 inet6ctlerrmap[cmd]);
2335 bzero(&inc, sizeof(inc));
2336 inc.inc_fibnum = M_GETFIB(m);
2337 inc.inc_flags |= INC_ISIPV6;
2338 inc.inc_fport = t_ports.th_dport;
2339 inc.inc_lport = t_ports.th_sport;
2340 inc.inc6_faddr = *dst;
2341 inc.inc6_laddr = ip6->ip6_src;
2342 syncache_unreach(&inc, icmp_tcp_seq);
2347 INP_INFO_RUNLOCK(&V_tcbinfo);
2353 * Following is where TCP initial sequence number generation occurs.
2355 * There are two places where we must use initial sequence numbers:
2356 * 1. In SYN-ACK packets.
2357 * 2. In SYN packets.
2359 * All ISNs for SYN-ACK packets are generated by the syncache. See
2360 * tcp_syncache.c for details.
2362 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
2363 * depends on this property. In addition, these ISNs should be
2364 * unguessable so as to prevent connection hijacking. To satisfy
2365 * the requirements of this situation, the algorithm outlined in
2366 * RFC 1948 is used, with only small modifications.
2368 * Implementation details:
2370 * Time is based off the system timer, and is corrected so that it
2371 * increases by one megabyte per second. This allows for proper
2372 * recycling on high speed LANs while still leaving over an hour
2375 * As reading the *exact* system time is too expensive to be done
2376 * whenever setting up a TCP connection, we increment the time
2377 * offset in two ways. First, a small random positive increment
2378 * is added to isn_offset for each connection that is set up.
2379 * Second, the function tcp_isn_tick fires once per clock tick
2380 * and increments isn_offset as necessary so that sequence numbers
2381 * are incremented at approximately ISN_BYTES_PER_SECOND. The
2382 * random positive increments serve only to ensure that the same
2383 * exact sequence number is never sent out twice (as could otherwise
2384 * happen when a port is recycled in less than the system tick
2387 * net.inet.tcp.isn_reseed_interval controls the number of seconds
2388 * between seeding of isn_secret. This is normally set to zero,
2389 * as reseeding should not be necessary.
2391 * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
2392 * isn_offset_old, and isn_ctx is performed using the TCP pcbinfo lock. In
2393 * general, this means holding an exclusive (write) lock.
2396 #define ISN_BYTES_PER_SECOND 1048576
2397 #define ISN_STATIC_INCREMENT 4096
2398 #define ISN_RANDOM_INCREMENT (4096 - 1)
2400 static VNET_DEFINE(u_char, isn_secret[32]);
2401 static VNET_DEFINE(int, isn_last);
2402 static VNET_DEFINE(int, isn_last_reseed);
2403 static VNET_DEFINE(u_int32_t, isn_offset);
2404 static VNET_DEFINE(u_int32_t, isn_offset_old);
2406 #define V_isn_secret VNET(isn_secret)
2407 #define V_isn_last VNET(isn_last)
2408 #define V_isn_last_reseed VNET(isn_last_reseed)
2409 #define V_isn_offset VNET(isn_offset)
2410 #define V_isn_offset_old VNET(isn_offset_old)
2413 tcp_new_isn(struct tcpcb *tp)
2416 u_int32_t md5_buffer[4];
2418 u_int32_t projected_offset;
2420 INP_WLOCK_ASSERT(tp->t_inpcb);
2423 /* Seed if this is the first use, reseed if requested. */
2424 if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) &&
2425 (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz)
2427 read_random(&V_isn_secret, sizeof(V_isn_secret));
2428 V_isn_last_reseed = ticks;
2431 /* Compute the md5 hash and return the ISN. */
2433 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
2434 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
2436 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
2437 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
2438 sizeof(struct in6_addr));
2439 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
2440 sizeof(struct in6_addr));
2444 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
2445 sizeof(struct in_addr));
2446 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
2447 sizeof(struct in_addr));
2449 MD5Update(&isn_ctx, (u_char *) &V_isn_secret, sizeof(V_isn_secret));
2450 MD5Final((u_char *) &md5_buffer, &isn_ctx);
2451 new_isn = (tcp_seq) md5_buffer[0];
2452 V_isn_offset += ISN_STATIC_INCREMENT +
2453 (arc4random() & ISN_RANDOM_INCREMENT);
2454 if (ticks != V_isn_last) {
2455 projected_offset = V_isn_offset_old +
2456 ISN_BYTES_PER_SECOND / hz * (ticks - V_isn_last);
2457 if (SEQ_GT(projected_offset, V_isn_offset))
2458 V_isn_offset = projected_offset;
2459 V_isn_offset_old = V_isn_offset;
2462 new_isn += V_isn_offset;
2468 * When a specific ICMP unreachable message is received and the
2469 * connection state is SYN-SENT, drop the connection. This behavior
2470 * is controlled by the icmp_may_rst sysctl.
2473 tcp_drop_syn_sent(struct inpcb *inp, int errno)
2477 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2478 INP_WLOCK_ASSERT(inp);
2480 if ((inp->inp_flags & INP_TIMEWAIT) ||
2481 (inp->inp_flags & INP_DROPPED))
2484 tp = intotcpcb(inp);
2485 if (tp->t_state != TCPS_SYN_SENT)
2488 if (IS_FASTOPEN(tp->t_flags))
2489 tcp_fastopen_disable_path(tp);
2491 tp = tcp_drop(tp, errno);
2499 * When `need fragmentation' ICMP is received, update our idea of the MSS
2500 * based on the new value. Also nudge TCP to send something, since we
2501 * know the packet we just sent was dropped.
2502 * This duplicates some code in the tcp_mss() function in tcp_input.c.
2504 static struct inpcb *
2505 tcp_mtudisc_notify(struct inpcb *inp, int error)
2508 tcp_mtudisc(inp, -1);
2513 tcp_mtudisc(struct inpcb *inp, int mtuoffer)
2518 INP_WLOCK_ASSERT(inp);
2519 if ((inp->inp_flags & INP_TIMEWAIT) ||
2520 (inp->inp_flags & INP_DROPPED))
2523 tp = intotcpcb(inp);
2524 KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL"));
2526 tcp_mss_update(tp, -1, mtuoffer, NULL, NULL);
2528 so = inp->inp_socket;
2529 SOCKBUF_LOCK(&so->so_snd);
2530 /* If the mss is larger than the socket buffer, decrease the mss. */
2531 if (so->so_snd.sb_hiwat < tp->t_maxseg)
2532 tp->t_maxseg = so->so_snd.sb_hiwat;
2533 SOCKBUF_UNLOCK(&so->so_snd);
2535 TCPSTAT_INC(tcps_mturesent);
2537 tp->snd_nxt = tp->snd_una;
2538 tcp_free_sackholes(tp);
2539 tp->snd_recover = tp->snd_max;
2540 if (tp->t_flags & TF_SACK_PERMIT)
2541 EXIT_FASTRECOVERY(tp->t_flags);
2542 tp->t_fb->tfb_tcp_output(tp);
2547 * Look-up the routing entry to the peer of this inpcb. If no route
2548 * is found and it cannot be allocated, then return 0. This routine
2549 * is called by TCP routines that access the rmx structure and by
2550 * tcp_mss_update to get the peer/interface MTU.
2553 tcp_maxmtu(struct in_conninfo *inc, struct tcp_ifcap *cap)
2555 struct nhop4_extended nh4;
2557 uint32_t maxmtu = 0;
2559 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
2561 if (inc->inc_faddr.s_addr != INADDR_ANY) {
2563 if (fib4_lookup_nh_ext(inc->inc_fibnum, inc->inc_faddr,
2564 NHR_REF, 0, &nh4) != 0)
2568 maxmtu = nh4.nh_mtu;
2570 /* Report additional interface capabilities. */
2572 if (ifp->if_capenable & IFCAP_TSO4 &&
2573 ifp->if_hwassist & CSUM_TSO) {
2574 cap->ifcap |= CSUM_TSO;
2575 cap->tsomax = ifp->if_hw_tsomax;
2576 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount;
2577 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize;
2580 fib4_free_nh_ext(inc->inc_fibnum, &nh4);
2588 tcp_maxmtu6(struct in_conninfo *inc, struct tcp_ifcap *cap)
2590 struct nhop6_extended nh6;
2591 struct in6_addr dst6;
2594 uint32_t maxmtu = 0;
2596 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
2598 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
2599 in6_splitscope(&inc->inc6_faddr, &dst6, &scopeid);
2600 if (fib6_lookup_nh_ext(inc->inc_fibnum, &dst6, scopeid, 0,
2605 maxmtu = nh6.nh_mtu;
2607 /* Report additional interface capabilities. */
2609 if (ifp->if_capenable & IFCAP_TSO6 &&
2610 ifp->if_hwassist & CSUM_TSO) {
2611 cap->ifcap |= CSUM_TSO;
2612 cap->tsomax = ifp->if_hw_tsomax;
2613 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount;
2614 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize;
2617 fib6_free_nh_ext(inc->inc_fibnum, &nh6);
2625 * Calculate effective SMSS per RFC5681 definition for a given TCP
2626 * connection at its current state, taking into account SACK and etc.
2629 tcp_maxseg(const struct tcpcb *tp)
2633 if (tp->t_flags & TF_NOOPT)
2634 return (tp->t_maxseg);
2637 * Here we have a simplified code from tcp_addoptions(),
2638 * without a proper loop, and having most of paddings hardcoded.
2639 * We might make mistakes with padding here in some edge cases,
2640 * but this is harmless, since result of tcp_maxseg() is used
2641 * only in cwnd and ssthresh estimations.
2643 #define PAD(len) ((((len) / 4) + !!((len) % 4)) * 4)
2644 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
2645 if (tp->t_flags & TF_RCVD_TSTMP)
2646 optlen = TCPOLEN_TSTAMP_APPA;
2649 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
2650 if (tp->t_flags & TF_SIGNATURE)
2651 optlen += PAD(TCPOLEN_SIGNATURE);
2653 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks > 0) {
2654 optlen += TCPOLEN_SACKHDR;
2655 optlen += tp->rcv_numsacks * TCPOLEN_SACK;
2656 optlen = PAD(optlen);
2659 if (tp->t_flags & TF_REQ_TSTMP)
2660 optlen = TCPOLEN_TSTAMP_APPA;
2662 optlen = PAD(TCPOLEN_MAXSEG);
2663 if (tp->t_flags & TF_REQ_SCALE)
2664 optlen += PAD(TCPOLEN_WINDOW);
2665 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
2666 if (tp->t_flags & TF_SIGNATURE)
2667 optlen += PAD(TCPOLEN_SIGNATURE);
2669 if (tp->t_flags & TF_SACK_PERMIT)
2670 optlen += PAD(TCPOLEN_SACK_PERMITTED);
2673 optlen = min(optlen, TCP_MAXOLEN);
2674 return (tp->t_maxseg - optlen);
2678 sysctl_drop(SYSCTL_HANDLER_ARGS)
2680 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
2681 struct sockaddr_storage addrs[2];
2685 struct sockaddr_in *fin, *lin;
2687 struct sockaddr_in6 *fin6, *lin6;
2698 if (req->oldptr != NULL || req->oldlen != 0)
2700 if (req->newptr == NULL)
2702 if (req->newlen < sizeof(addrs))
2704 error = SYSCTL_IN(req, &addrs, sizeof(addrs));
2708 switch (addrs[0].ss_family) {
2711 fin6 = (struct sockaddr_in6 *)&addrs[0];
2712 lin6 = (struct sockaddr_in6 *)&addrs[1];
2713 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
2714 lin6->sin6_len != sizeof(struct sockaddr_in6))
2716 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
2717 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
2719 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
2720 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
2721 fin = (struct sockaddr_in *)&addrs[0];
2722 lin = (struct sockaddr_in *)&addrs[1];
2725 error = sa6_embedscope(fin6, V_ip6_use_defzone);
2728 error = sa6_embedscope(lin6, V_ip6_use_defzone);
2735 fin = (struct sockaddr_in *)&addrs[0];
2736 lin = (struct sockaddr_in *)&addrs[1];
2737 if (fin->sin_len != sizeof(struct sockaddr_in) ||
2738 lin->sin_len != sizeof(struct sockaddr_in))
2745 INP_INFO_RLOCK(&V_tcbinfo);
2746 switch (addrs[0].ss_family) {
2749 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr,
2750 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port,
2751 INPLOOKUP_WLOCKPCB, NULL);
2756 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port,
2757 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL);
2762 if (inp->inp_flags & INP_TIMEWAIT) {
2764 * XXXRW: There currently exists a state where an
2765 * inpcb is present, but its timewait state has been
2766 * discarded. For now, don't allow dropping of this
2774 } else if (!(inp->inp_flags & INP_DROPPED) &&
2775 !(inp->inp_socket->so_options & SO_ACCEPTCONN)) {
2776 tp = intotcpcb(inp);
2777 tp = tcp_drop(tp, ECONNABORTED);
2784 INP_INFO_RUNLOCK(&V_tcbinfo);
2788 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop,
2789 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP, NULL,
2790 0, sysctl_drop, "", "Drop TCP connection");
2793 * Generate a standardized TCP log line for use throughout the
2794 * tcp subsystem. Memory allocation is done with M_NOWAIT to
2795 * allow use in the interrupt context.
2797 * NB: The caller MUST free(s, M_TCPLOG) the returned string.
2798 * NB: The function may return NULL if memory allocation failed.
2800 * Due to header inclusion and ordering limitations the struct ip
2801 * and ip6_hdr pointers have to be passed as void pointers.
2804 tcp_log_vain(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
2808 /* Is logging enabled? */
2809 if (tcp_log_in_vain == 0)
2812 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
2816 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
2820 /* Is logging enabled? */
2821 if (tcp_log_debug == 0)
2824 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
2828 tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
2835 const struct ip6_hdr *ip6;
2837 ip6 = (const struct ip6_hdr *)ip6hdr;
2839 ip = (struct ip *)ip4hdr;
2842 * The log line looks like this:
2843 * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>"
2845 size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") +
2846 sizeof(PRINT_TH_FLAGS) + 1 +
2848 2 * INET6_ADDRSTRLEN;
2850 2 * INET_ADDRSTRLEN;
2853 s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT);
2857 strcat(s, "TCP: [");
2860 if (inc && ((inc->inc_flags & INC_ISIPV6) == 0)) {
2861 inet_ntoa_r(inc->inc_faddr, sp);
2863 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
2865 inet_ntoa_r(inc->inc_laddr, sp);
2867 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
2870 ip6_sprintf(sp, &inc->inc6_faddr);
2872 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
2874 ip6_sprintf(sp, &inc->inc6_laddr);
2876 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
2877 } else if (ip6 && th) {
2878 ip6_sprintf(sp, &ip6->ip6_src);
2880 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
2882 ip6_sprintf(sp, &ip6->ip6_dst);
2884 sprintf(sp, "]:%i", ntohs(th->th_dport));
2887 } else if (ip && th) {
2888 inet_ntoa_r(ip->ip_src, sp);
2890 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
2892 inet_ntoa_r(ip->ip_dst, sp);
2894 sprintf(sp, "]:%i", ntohs(th->th_dport));
2902 sprintf(sp, " tcpflags 0x%b", th->th_flags, PRINT_TH_FLAGS);
2903 if (*(s + size - 1) != '\0')
2904 panic("%s: string too long", __func__);
2909 * A subroutine which makes it easy to track TCP state changes with DTrace.
2910 * This function shouldn't be called for t_state initializations that don't
2911 * correspond to actual TCP state transitions.
2914 tcp_state_change(struct tcpcb *tp, int newstate)
2916 #if defined(KDTRACE_HOOKS)
2917 int pstate = tp->t_state;
2920 TCPSTATES_DEC(tp->t_state);
2921 TCPSTATES_INC(newstate);
2922 tp->t_state = newstate;
2923 TCP_PROBE6(state__change, NULL, tp, NULL, tp, NULL, pstate);
2927 * Create an external-format (``xtcpcb'') structure using the information in
2928 * the kernel-format tcpcb structure pointed to by tp. This is done to
2929 * reduce the spew of irrelevant information over this interface, to isolate
2930 * user code from changes in the kernel structure, and potentially to provide
2931 * information-hiding if we decide that some of this information should be
2932 * hidden from users.
2935 tcp_inptoxtp(const struct inpcb *inp, struct xtcpcb *xt)
2937 struct tcpcb *tp = intotcpcb(inp);
2940 if (inp->inp_flags & INP_TIMEWAIT) {
2941 bzero(xt, sizeof(struct xtcpcb));
2942 xt->t_state = TCPS_TIME_WAIT;
2944 xt->t_state = tp->t_state;
2945 xt->t_logstate = tp->t_logstate;
2946 xt->t_flags = tp->t_flags;
2947 xt->t_sndzerowin = tp->t_sndzerowin;
2948 xt->t_sndrexmitpack = tp->t_sndrexmitpack;
2949 xt->t_rcvoopack = tp->t_rcvoopack;
2951 now = getsbinuptime();
2952 #define COPYTIMER(ttt) do { \
2953 if (callout_active(&tp->t_timers->ttt)) \
2954 xt->ttt = (tp->t_timers->ttt.c_time - now) / \
2959 COPYTIMER(tt_delack);
2960 COPYTIMER(tt_rexmt);
2961 COPYTIMER(tt_persist);
2965 xt->t_rcvtime = 1000 * (ticks - tp->t_rcvtime) / hz;
2967 bcopy(tp->t_fb->tfb_tcp_block_name, xt->xt_stack,
2968 TCP_FUNCTION_NAME_LEN_MAX);
2969 bzero(xt->xt_logid, TCP_LOG_ID_LEN);
2971 (void)tcp_log_get_id(tp, xt->xt_logid);
2975 xt->xt_len = sizeof(struct xtcpcb);
2976 in_pcbtoxinpcb(inp, &xt->xt_inp);
2977 if (inp->inp_socket == NULL)
2978 xt->xt_inp.xi_socket.xso_protocol = IPPROTO_TCP;