2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 #include "opt_inet6.h"
39 #include "opt_ipsec.h"
40 #include "opt_kern_tls.h"
41 #include "opt_tcpdebug.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
46 #include <sys/callout.h>
47 #include <sys/eventhandler.h>
49 #include <sys/hhook.h>
51 #include <sys/kernel.h>
53 #include <sys/khelp.h>
58 #include <sys/qmath.h>
59 #include <sys/stats.h>
60 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/refcount.h>
66 #include <sys/domain.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/protosw.h>
74 #include <sys/random.h>
78 #include <net/route.h>
79 #include <net/route/nhop.h>
81 #include <net/if_var.h>
84 #include <netinet/in.h>
85 #include <netinet/in_fib.h>
86 #include <netinet/in_kdtrace.h>
87 #include <netinet/in_pcb.h>
88 #include <netinet/in_systm.h>
89 #include <netinet/in_var.h>
90 #include <netinet/ip.h>
91 #include <netinet/ip_icmp.h>
92 #include <netinet/ip_var.h>
94 #include <netinet/icmp6.h>
95 #include <netinet/ip6.h>
96 #include <netinet6/in6_fib.h>
97 #include <netinet6/in6_pcb.h>
98 #include <netinet6/ip6_var.h>
99 #include <netinet6/scope6_var.h>
100 #include <netinet6/nd6.h>
103 #include <netinet/tcp.h>
104 #include <netinet/tcp_fsm.h>
105 #include <netinet/tcp_seq.h>
106 #include <netinet/tcp_timer.h>
107 #include <netinet/tcp_var.h>
108 #include <netinet/tcp_log_buf.h>
109 #include <netinet/tcp_syncache.h>
110 #include <netinet/tcp_hpts.h>
111 #include <netinet/cc/cc.h>
113 #include <netinet6/tcp6_var.h>
115 #include <netinet/tcpip.h>
116 #include <netinet/tcp_fastopen.h>
118 #include <netinet/tcp_pcap.h>
121 #include <netinet/tcp_debug.h>
124 #include <netinet6/ip6protosw.h>
127 #include <netinet/tcp_offload.h>
130 #include <netipsec/ipsec_support.h>
132 #include <machine/in_cksum.h>
133 #include <crypto/siphash/siphash.h>
135 #include <security/mac/mac_framework.h>
137 VNET_DEFINE(int, tcp_mssdflt) = TCP_MSS;
139 VNET_DEFINE(int, tcp_v6mssdflt) = TCP6_MSS;
142 #ifdef NETFLIX_EXP_DETECTION
143 /* Sack attack detection thresholds and such */
144 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack_attack,
145 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
146 "Sack Attack detection thresholds");
147 int32_t tcp_force_detection = 0;
148 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, force_detection,
150 &tcp_force_detection, 0,
151 "Do we force detection even if the INP has it off?");
152 int32_t tcp_sack_to_ack_thresh = 700; /* 70 % */
153 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sack_to_ack_thresh,
155 &tcp_sack_to_ack_thresh, 700,
156 "Percentage of sacks to acks we must see above (10.1 percent is 101)?");
157 int32_t tcp_sack_to_move_thresh = 600; /* 60 % */
158 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, move_thresh,
160 &tcp_sack_to_move_thresh, 600,
161 "Percentage of sack moves we must see above (10.1 percent is 101)");
162 int32_t tcp_restoral_thresh = 650; /* 65 % (sack:2:ack -5%) */
163 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, restore_thresh,
165 &tcp_restoral_thresh, 550,
166 "Percentage of sack to ack percentage we must see below to restore(10.1 percent is 101)");
167 int32_t tcp_sad_decay_val = 800;
168 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, decay_per,
170 &tcp_sad_decay_val, 800,
171 "The decay percentage (10.1 percent equals 101 )");
172 int32_t tcp_map_minimum = 500;
173 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, nummaps,
175 &tcp_map_minimum, 500,
176 "Number of Map enteries before we start detection");
177 int32_t tcp_attack_on_turns_on_logging = 0;
178 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, attacks_logged,
180 &tcp_attack_on_turns_on_logging, 0,
181 "When we have a positive hit on attack, do we turn on logging?");
182 int32_t tcp_sad_pacing_interval = 2000;
183 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_pacing_int,
185 &tcp_sad_pacing_interval, 2000,
186 "What is the minimum pacing interval for a classified attacker?");
188 int32_t tcp_sad_low_pps = 100;
189 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_low_pps,
191 &tcp_sad_low_pps, 100,
192 "What is the input pps that below which we do not decay?");
195 struct rwlock tcp_function_lock;
198 sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS)
203 error = sysctl_handle_int(oidp, &new, 0, req);
204 if (error == 0 && req->newptr) {
205 if (new < TCP_MINMSS)
213 SYSCTL_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt,
214 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
215 &VNET_NAME(tcp_mssdflt), 0, &sysctl_net_inet_tcp_mss_check, "I",
216 "Default TCP Maximum Segment Size");
220 sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS)
224 new = V_tcp_v6mssdflt;
225 error = sysctl_handle_int(oidp, &new, 0, req);
226 if (error == 0 && req->newptr) {
227 if (new < TCP_MINMSS)
230 V_tcp_v6mssdflt = new;
235 SYSCTL_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
236 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
237 &VNET_NAME(tcp_v6mssdflt), 0, &sysctl_net_inet_tcp_mss_v6_check, "I",
238 "Default TCP Maximum Segment Size for IPv6");
242 * Minimum MSS we accept and use. This prevents DoS attacks where
243 * we are forced to a ridiculous low MSS like 20 and send hundreds
244 * of packets instead of one. The effect scales with the available
245 * bandwidth and quickly saturates the CPU and network interface
246 * with packet generation and sending. Set to zero to disable MINMSS
247 * checking. This setting prevents us from sending too small packets.
249 VNET_DEFINE(int, tcp_minmss) = TCP_MINMSS;
250 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_VNET | CTLFLAG_RW,
251 &VNET_NAME(tcp_minmss), 0,
252 "Minimum TCP Maximum Segment Size");
254 VNET_DEFINE(int, tcp_do_rfc1323) = 1;
255 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_VNET | CTLFLAG_RW,
256 &VNET_NAME(tcp_do_rfc1323), 0,
257 "Enable rfc1323 (high performance TCP) extensions");
259 VNET_DEFINE(int, tcp_ts_offset_per_conn) = 1;
260 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ts_offset_per_conn, CTLFLAG_VNET | CTLFLAG_RW,
261 &VNET_NAME(tcp_ts_offset_per_conn), 0,
262 "Initialize TCP timestamps per connection instead of per host pair");
264 static int tcp_log_debug = 0;
265 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW,
266 &tcp_log_debug, 0, "Log errors caused by incoming TCP segments");
268 static int tcp_tcbhashsize;
269 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
270 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
272 static int do_tcpdrain = 1;
273 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
274 "Enable tcp_drain routine for extra help when low on mbufs");
276 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_VNET | CTLFLAG_RD,
277 &VNET_NAME(tcbinfo.ipi_count), 0, "Number of active PCBs");
279 VNET_DEFINE_STATIC(int, icmp_may_rst) = 1;
280 #define V_icmp_may_rst VNET(icmp_may_rst)
281 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_VNET | CTLFLAG_RW,
282 &VNET_NAME(icmp_may_rst), 0,
283 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
285 VNET_DEFINE_STATIC(int, tcp_isn_reseed_interval) = 0;
286 #define V_tcp_isn_reseed_interval VNET(tcp_isn_reseed_interval)
287 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_VNET | CTLFLAG_RW,
288 &VNET_NAME(tcp_isn_reseed_interval), 0,
289 "Seconds between reseeding of ISN secret");
291 static int tcp_soreceive_stream;
292 SYSCTL_INT(_net_inet_tcp, OID_AUTO, soreceive_stream, CTLFLAG_RDTUN,
293 &tcp_soreceive_stream, 0, "Using soreceive_stream for TCP sockets");
295 VNET_DEFINE(uma_zone_t, sack_hole_zone);
296 #define V_sack_hole_zone VNET(sack_hole_zone)
297 VNET_DEFINE(uint32_t, tcp_map_entries_limit) = 0; /* unlimited */
299 sysctl_net_inet_tcp_map_limit_check(SYSCTL_HANDLER_ARGS)
304 new = V_tcp_map_entries_limit;
305 error = sysctl_handle_int(oidp, &new, 0, req);
306 if (error == 0 && req->newptr) {
307 /* only allow "0" and value > minimum */
308 if (new > 0 && new < TCP_MIN_MAP_ENTRIES_LIMIT)
311 V_tcp_map_entries_limit = new;
315 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, map_limit,
316 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
317 &VNET_NAME(tcp_map_entries_limit), 0,
318 &sysctl_net_inet_tcp_map_limit_check, "IU",
319 "Total sendmap entries limit");
321 VNET_DEFINE(uint32_t, tcp_map_split_limit) = 0; /* unlimited */
322 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, split_limit, CTLFLAG_VNET | CTLFLAG_RW,
323 &VNET_NAME(tcp_map_split_limit), 0,
324 "Total sendmap split entries limit");
327 VNET_DEFINE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST+1]);
330 #define TS_OFFSET_SECRET_LENGTH SIPHASH_KEY_LENGTH
331 VNET_DEFINE_STATIC(u_char, ts_offset_secret[TS_OFFSET_SECRET_LENGTH]);
332 #define V_ts_offset_secret VNET(ts_offset_secret)
334 static int tcp_default_fb_init(struct tcpcb *tp);
335 static void tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged);
336 static int tcp_default_handoff_ok(struct tcpcb *tp);
337 static struct inpcb *tcp_notify(struct inpcb *, int);
338 static struct inpcb *tcp_mtudisc_notify(struct inpcb *, int);
339 static void tcp_mtudisc(struct inpcb *, int);
340 static char * tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th,
341 void *ip4hdr, const void *ip6hdr);
343 static struct tcp_function_block tcp_def_funcblk = {
344 .tfb_tcp_block_name = "freebsd",
345 .tfb_tcp_output = tcp_output,
346 .tfb_tcp_do_segment = tcp_do_segment,
347 .tfb_tcp_ctloutput = tcp_default_ctloutput,
348 .tfb_tcp_handoff_ok = tcp_default_handoff_ok,
349 .tfb_tcp_fb_init = tcp_default_fb_init,
350 .tfb_tcp_fb_fini = tcp_default_fb_fini,
353 static int tcp_fb_cnt = 0;
354 struct tcp_funchead t_functions;
355 static struct tcp_function_block *tcp_func_set_ptr = &tcp_def_funcblk;
357 static struct tcp_function_block *
358 find_tcp_functions_locked(struct tcp_function_set *fs)
360 struct tcp_function *f;
361 struct tcp_function_block *blk=NULL;
363 TAILQ_FOREACH(f, &t_functions, tf_next) {
364 if (strcmp(f->tf_name, fs->function_set_name) == 0) {
372 static struct tcp_function_block *
373 find_tcp_fb_locked(struct tcp_function_block *blk, struct tcp_function **s)
375 struct tcp_function_block *rblk=NULL;
376 struct tcp_function *f;
378 TAILQ_FOREACH(f, &t_functions, tf_next) {
379 if (f->tf_fb == blk) {
390 struct tcp_function_block *
391 find_and_ref_tcp_functions(struct tcp_function_set *fs)
393 struct tcp_function_block *blk;
395 rw_rlock(&tcp_function_lock);
396 blk = find_tcp_functions_locked(fs);
398 refcount_acquire(&blk->tfb_refcnt);
399 rw_runlock(&tcp_function_lock);
403 struct tcp_function_block *
404 find_and_ref_tcp_fb(struct tcp_function_block *blk)
406 struct tcp_function_block *rblk;
408 rw_rlock(&tcp_function_lock);
409 rblk = find_tcp_fb_locked(blk, NULL);
411 refcount_acquire(&rblk->tfb_refcnt);
412 rw_runlock(&tcp_function_lock);
416 static struct tcp_function_block *
417 find_and_ref_tcp_default_fb(void)
419 struct tcp_function_block *rblk;
421 rw_rlock(&tcp_function_lock);
422 rblk = tcp_func_set_ptr;
423 refcount_acquire(&rblk->tfb_refcnt);
424 rw_runlock(&tcp_function_lock);
429 tcp_switch_back_to_default(struct tcpcb *tp)
431 struct tcp_function_block *tfb;
433 KASSERT(tp->t_fb != &tcp_def_funcblk,
434 ("%s: called by the built-in default stack", __func__));
437 * Release the old stack. This function will either find a new one
440 if (tp->t_fb->tfb_tcp_fb_fini != NULL)
441 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 0);
442 refcount_release(&tp->t_fb->tfb_refcnt);
445 * Now, we'll find a new function block to use.
446 * Start by trying the current user-selected
447 * default, unless this stack is the user-selected
450 tfb = find_and_ref_tcp_default_fb();
451 if (tfb == tp->t_fb) {
452 refcount_release(&tfb->tfb_refcnt);
455 /* Does the stack accept this connection? */
456 if (tfb != NULL && tfb->tfb_tcp_handoff_ok != NULL &&
457 (*tfb->tfb_tcp_handoff_ok)(tp)) {
458 refcount_release(&tfb->tfb_refcnt);
461 /* Try to use that stack. */
463 /* Initialize the new stack. If it succeeds, we are done. */
465 if (tp->t_fb->tfb_tcp_fb_init == NULL ||
466 (*tp->t_fb->tfb_tcp_fb_init)(tp) == 0)
470 * Initialization failed. Release the reference count on
473 refcount_release(&tfb->tfb_refcnt);
477 * If that wasn't feasible, use the built-in default
478 * stack which is not allowed to reject anyone.
480 tfb = find_and_ref_tcp_fb(&tcp_def_funcblk);
482 /* there always should be a default */
483 panic("Can't refer to tcp_def_funcblk");
485 if (tfb->tfb_tcp_handoff_ok != NULL) {
486 if ((*tfb->tfb_tcp_handoff_ok) (tp)) {
487 /* The default stack cannot say no */
488 panic("Default stack rejects a new session?");
492 if (tp->t_fb->tfb_tcp_fb_init != NULL &&
493 (*tp->t_fb->tfb_tcp_fb_init)(tp)) {
494 /* The default stack cannot fail */
495 panic("Default stack initialization failed");
500 sysctl_net_inet_default_tcp_functions(SYSCTL_HANDLER_ARGS)
503 struct tcp_function_set fs;
504 struct tcp_function_block *blk;
506 memset(&fs, 0, sizeof(fs));
507 rw_rlock(&tcp_function_lock);
508 blk = find_tcp_fb_locked(tcp_func_set_ptr, NULL);
511 strcpy(fs.function_set_name, blk->tfb_tcp_block_name);
512 fs.pcbcnt = blk->tfb_refcnt;
514 rw_runlock(&tcp_function_lock);
515 error = sysctl_handle_string(oidp, fs.function_set_name,
516 sizeof(fs.function_set_name), req);
518 /* Check for error or no change */
519 if (error != 0 || req->newptr == NULL)
522 rw_wlock(&tcp_function_lock);
523 blk = find_tcp_functions_locked(&fs);
525 (blk->tfb_flags & TCP_FUNC_BEING_REMOVED)) {
529 tcp_func_set_ptr = blk;
531 rw_wunlock(&tcp_function_lock);
535 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_default,
536 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
537 NULL, 0, sysctl_net_inet_default_tcp_functions, "A",
538 "Set/get the default TCP functions");
541 sysctl_net_inet_list_available(SYSCTL_HANDLER_ARGS)
543 int error, cnt, linesz;
544 struct tcp_function *f;
550 rw_rlock(&tcp_function_lock);
551 TAILQ_FOREACH(f, &t_functions, tf_next) {
554 rw_runlock(&tcp_function_lock);
556 bufsz = (cnt+2) * ((TCP_FUNCTION_NAME_LEN_MAX * 2) + 13) + 1;
557 buffer = malloc(bufsz, M_TEMP, M_WAITOK);
562 linesz = snprintf(cp, bufsz, "\n%-32s%c %-32s %s\n", "Stack", 'D',
563 "Alias", "PCB count");
568 rw_rlock(&tcp_function_lock);
569 TAILQ_FOREACH(f, &t_functions, tf_next) {
570 alias = (f->tf_name != f->tf_fb->tfb_tcp_block_name);
571 linesz = snprintf(cp, bufsz, "%-32s%c %-32s %u\n",
572 f->tf_fb->tfb_tcp_block_name,
573 (f->tf_fb == tcp_func_set_ptr) ? '*' : ' ',
574 alias ? f->tf_name : "-",
575 f->tf_fb->tfb_refcnt);
576 if (linesz >= bufsz) {
584 rw_runlock(&tcp_function_lock);
586 error = sysctl_handle_string(oidp, buffer, outsz + 1, req);
587 free(buffer, M_TEMP);
591 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_available,
592 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
593 NULL, 0, sysctl_net_inet_list_available, "A",
594 "list available TCP Function sets");
597 * Exports one (struct tcp_function_info) for each alias/name.
600 sysctl_net_inet_list_func_info(SYSCTL_HANDLER_ARGS)
603 struct tcp_function *f;
604 struct tcp_function_info tfi;
607 * We don't allow writes.
609 if (req->newptr != NULL)
613 * Wire the old buffer so we can directly copy the functions to
614 * user space without dropping the lock.
616 if (req->oldptr != NULL) {
617 error = sysctl_wire_old_buffer(req, 0);
623 * Walk the list and copy out matching entries. If INVARIANTS
624 * is compiled in, also walk the list to verify the length of
625 * the list matches what we have recorded.
627 rw_rlock(&tcp_function_lock);
631 if (req->oldptr == NULL) {
636 TAILQ_FOREACH(f, &t_functions, tf_next) {
640 if (req->oldptr != NULL) {
641 bzero(&tfi, sizeof(tfi));
642 tfi.tfi_refcnt = f->tf_fb->tfb_refcnt;
643 tfi.tfi_id = f->tf_fb->tfb_id;
644 (void)strlcpy(tfi.tfi_alias, f->tf_name,
645 sizeof(tfi.tfi_alias));
646 (void)strlcpy(tfi.tfi_name,
647 f->tf_fb->tfb_tcp_block_name, sizeof(tfi.tfi_name));
648 error = SYSCTL_OUT(req, &tfi, sizeof(tfi));
650 * Don't stop on error, as that is the
651 * mechanism we use to accumulate length
652 * information if the buffer was too short.
656 KASSERT(cnt == tcp_fb_cnt,
657 ("%s: cnt (%d) != tcp_fb_cnt (%d)", __func__, cnt, tcp_fb_cnt));
661 rw_runlock(&tcp_function_lock);
662 if (req->oldptr == NULL)
663 error = SYSCTL_OUT(req, NULL,
664 (cnt + 1) * sizeof(struct tcp_function_info));
669 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, function_info,
670 CTLTYPE_OPAQUE | CTLFLAG_SKIP | CTLFLAG_RD | CTLFLAG_MPSAFE,
671 NULL, 0, sysctl_net_inet_list_func_info, "S,tcp_function_info",
672 "List TCP function block name-to-ID mappings");
675 * tfb_tcp_handoff_ok() function for the default stack.
676 * Note that we'll basically try to take all comers.
679 tcp_default_handoff_ok(struct tcpcb *tp)
686 * tfb_tcp_fb_init() function for the default stack.
688 * This handles making sure we have appropriate timers set if you are
689 * transitioning a socket that has some amount of setup done.
691 * The init() fuction from the default can *never* return non-zero i.e.
692 * it is required to always succeed since it is the stack of last resort!
695 tcp_default_fb_init(struct tcpcb *tp)
700 INP_WLOCK_ASSERT(tp->t_inpcb);
702 KASSERT(tp->t_state >= 0 && tp->t_state < TCPS_TIME_WAIT,
703 ("%s: connection %p in unexpected state %d", __func__, tp,
707 * Nothing to do for ESTABLISHED or LISTEN states. And, we don't
708 * know what to do for unexpected states (which includes TIME_WAIT).
710 if (tp->t_state <= TCPS_LISTEN || tp->t_state >= TCPS_TIME_WAIT)
714 * Make sure some kind of transmission timer is set if there is
717 so = tp->t_inpcb->inp_socket;
718 if ((!TCPS_HAVEESTABLISHED(tp->t_state) || sbavail(&so->so_snd) ||
719 tp->snd_una != tp->snd_max) && !(tcp_timer_active(tp, TT_REXMT) ||
720 tcp_timer_active(tp, TT_PERSIST))) {
722 * If the session has established and it looks like it should
723 * be in the persist state, set the persist timer. Otherwise,
724 * set the retransmit timer.
726 if (TCPS_HAVEESTABLISHED(tp->t_state) && tp->snd_wnd == 0 &&
727 (int32_t)(tp->snd_nxt - tp->snd_una) <
728 (int32_t)sbavail(&so->so_snd))
731 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
734 /* All non-embryonic sessions get a keepalive timer. */
735 if (!tcp_timer_active(tp, TT_KEEP))
736 tcp_timer_activate(tp, TT_KEEP,
737 TCPS_HAVEESTABLISHED(tp->t_state) ? TP_KEEPIDLE(tp) :
744 * tfb_tcp_fb_fini() function for the default stack.
746 * This changes state as necessary (or prudent) to prepare for another stack
747 * to assume responsibility for the connection.
750 tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged)
753 INP_WLOCK_ASSERT(tp->t_inpcb);
758 * Target size of TCP PCB hash tables. Must be a power of two.
760 * Note that this can be overridden by the kernel environment
761 * variable net.inet.tcp.tcbhashsize
764 #define TCBHASHSIZE 0
769 * Callouts should be moved into struct tcp directly. They are currently
770 * separate because the tcpcb structure is exported to userland for sysctl
771 * parsing purposes, which do not know about callouts.
782 VNET_DEFINE_STATIC(uma_zone_t, tcpcb_zone);
783 #define V_tcpcb_zone VNET(tcpcb_zone)
785 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers");
786 MALLOC_DEFINE(M_TCPFUNCTIONS, "tcpfunc", "TCP function set memory");
788 static struct mtx isn_mtx;
790 #define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
791 #define ISN_LOCK() mtx_lock(&isn_mtx)
792 #define ISN_UNLOCK() mtx_unlock(&isn_mtx)
795 * TCP initialization.
798 tcp_zone_change(void *tag)
801 uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets);
802 uma_zone_set_max(V_tcpcb_zone, maxsockets);
803 tcp_tw_zone_change();
807 tcp_inpcb_init(void *mem, int size, int flags)
809 struct inpcb *inp = mem;
811 INP_LOCK_INIT(inp, "inp", "tcpinp");
816 * Take a value and get the next power of 2 that doesn't overflow.
817 * Used to size the tcp_inpcb hash buckets.
820 maketcp_hashsize(int size)
826 * get the next power of 2 higher than maxsockets.
828 hashsize = 1 << fls(size);
829 /* catch overflow, and just go one power of 2 smaller */
830 if (hashsize < size) {
831 hashsize = 1 << (fls(size) - 1);
836 static volatile int next_tcp_stack_id = 1;
839 * Register a TCP function block with the name provided in the names
840 * array. (Note that this function does NOT automatically register
841 * blk->tfb_tcp_block_name as a stack name. Therefore, you should
842 * explicitly include blk->tfb_tcp_block_name in the list of names if
843 * you wish to register the stack with that name.)
845 * Either all name registrations will succeed or all will fail. If
846 * a name registration fails, the function will update the num_names
847 * argument to point to the array index of the name that encountered
850 * Returns 0 on success, or an error code on failure.
853 register_tcp_functions_as_names(struct tcp_function_block *blk, int wait,
854 const char *names[], int *num_names)
856 struct tcp_function *n;
857 struct tcp_function_set fs;
860 KASSERT(names != NULL && *num_names > 0,
861 ("%s: Called with 0-length name list", __func__));
862 KASSERT(names != NULL, ("%s: Called with NULL name list", __func__));
863 KASSERT(rw_initialized(&tcp_function_lock),
864 ("%s: called too early", __func__));
866 if ((blk->tfb_tcp_output == NULL) ||
867 (blk->tfb_tcp_do_segment == NULL) ||
868 (blk->tfb_tcp_ctloutput == NULL) ||
869 (strlen(blk->tfb_tcp_block_name) == 0)) {
871 * These functions are required and you
877 if (blk->tfb_tcp_timer_stop_all ||
878 blk->tfb_tcp_timer_activate ||
879 blk->tfb_tcp_timer_active ||
880 blk->tfb_tcp_timer_stop) {
882 * If you define one timer function you
883 * must have them all.
885 if ((blk->tfb_tcp_timer_stop_all == NULL) ||
886 (blk->tfb_tcp_timer_activate == NULL) ||
887 (blk->tfb_tcp_timer_active == NULL) ||
888 (blk->tfb_tcp_timer_stop == NULL)) {
894 if (blk->tfb_flags & TCP_FUNC_BEING_REMOVED) {
899 refcount_init(&blk->tfb_refcnt, 0);
900 blk->tfb_id = atomic_fetchadd_int(&next_tcp_stack_id, 1);
901 for (i = 0; i < *num_names; i++) {
902 n = malloc(sizeof(struct tcp_function), M_TCPFUNCTIONS, wait);
909 (void)strlcpy(fs.function_set_name, names[i],
910 sizeof(fs.function_set_name));
911 rw_wlock(&tcp_function_lock);
912 if (find_tcp_functions_locked(&fs) != NULL) {
913 /* Duplicate name space not allowed */
914 rw_wunlock(&tcp_function_lock);
915 free(n, M_TCPFUNCTIONS);
919 (void)strlcpy(n->tf_name, names[i], sizeof(n->tf_name));
920 TAILQ_INSERT_TAIL(&t_functions, n, tf_next);
922 rw_wunlock(&tcp_function_lock);
928 * Deregister the names we just added. Because registration failed
929 * for names[i], we don't need to deregister that name.
932 rw_wlock(&tcp_function_lock);
934 TAILQ_FOREACH(n, &t_functions, tf_next) {
935 if (!strncmp(n->tf_name, names[i],
936 TCP_FUNCTION_NAME_LEN_MAX)) {
937 TAILQ_REMOVE(&t_functions, n, tf_next);
940 free(n, M_TCPFUNCTIONS);
945 rw_wunlock(&tcp_function_lock);
950 * Register a TCP function block using the name provided in the name
953 * Returns 0 on success, or an error code on failure.
956 register_tcp_functions_as_name(struct tcp_function_block *blk, const char *name,
959 const char *name_list[1];
966 name_list[0] = blk->tfb_tcp_block_name;
967 rv = register_tcp_functions_as_names(blk, wait, name_list, &num_names);
972 * Register a TCP function block using the name defined in
973 * blk->tfb_tcp_block_name.
975 * Returns 0 on success, or an error code on failure.
978 register_tcp_functions(struct tcp_function_block *blk, int wait)
981 return (register_tcp_functions_as_name(blk, NULL, wait));
985 * Deregister all names associated with a function block. This
986 * functionally removes the function block from use within the system.
988 * When called with a true quiesce argument, mark the function block
989 * as being removed so no more stacks will use it and determine
990 * whether the removal would succeed.
992 * When called with a false quiesce argument, actually attempt the
995 * When called with a force argument, attempt to switch all TCBs to
996 * use the default stack instead of returning EBUSY.
998 * Returns 0 on success (or if the removal would succeed, or an error
1002 deregister_tcp_functions(struct tcp_function_block *blk, bool quiesce,
1005 struct tcp_function *f;
1007 if (blk == &tcp_def_funcblk) {
1008 /* You can't un-register the default */
1011 rw_wlock(&tcp_function_lock);
1012 if (blk == tcp_func_set_ptr) {
1013 /* You can't free the current default */
1014 rw_wunlock(&tcp_function_lock);
1017 /* Mark the block so no more stacks can use it. */
1018 blk->tfb_flags |= TCP_FUNC_BEING_REMOVED;
1020 * If TCBs are still attached to the stack, attempt to switch them
1021 * to the default stack.
1023 if (force && blk->tfb_refcnt) {
1026 VNET_ITERATOR_DECL(vnet_iter);
1028 rw_wunlock(&tcp_function_lock);
1031 VNET_FOREACH(vnet_iter) {
1032 CURVNET_SET(vnet_iter);
1033 INP_INFO_WLOCK(&V_tcbinfo);
1034 CK_LIST_FOREACH(inp, V_tcbinfo.ipi_listhead, inp_list) {
1036 if (inp->inp_flags & INP_TIMEWAIT) {
1040 tp = intotcpcb(inp);
1041 if (tp == NULL || tp->t_fb != blk) {
1045 tcp_switch_back_to_default(tp);
1048 INP_INFO_WUNLOCK(&V_tcbinfo);
1051 VNET_LIST_RUNLOCK();
1053 rw_wlock(&tcp_function_lock);
1055 if (blk->tfb_refcnt) {
1056 /* TCBs still attached. */
1057 rw_wunlock(&tcp_function_lock);
1062 rw_wunlock(&tcp_function_lock);
1065 /* Remove any function names that map to this function block. */
1066 while (find_tcp_fb_locked(blk, &f) != NULL) {
1067 TAILQ_REMOVE(&t_functions, f, tf_next);
1070 free(f, M_TCPFUNCTIONS);
1072 rw_wunlock(&tcp_function_lock);
1079 const char *tcbhash_tuneable;
1082 tcbhash_tuneable = "net.inet.tcp.tcbhashsize";
1085 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN,
1086 &V_tcp_hhh[HHOOK_TCP_EST_IN], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
1087 printf("%s: WARNING: unable to register helper hook\n", __func__);
1088 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT,
1089 &V_tcp_hhh[HHOOK_TCP_EST_OUT], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
1090 printf("%s: WARNING: unable to register helper hook\n", __func__);
1093 if (tcp_stats_init())
1094 printf("%s: WARNING: unable to initialise TCP stats\n",
1097 hashsize = TCBHASHSIZE;
1098 TUNABLE_INT_FETCH(tcbhash_tuneable, &hashsize);
1099 if (hashsize == 0) {
1101 * Auto tune the hash size based on maxsockets.
1102 * A perfect hash would have a 1:1 mapping
1103 * (hashsize = maxsockets) however it's been
1104 * suggested that O(2) average is better.
1106 hashsize = maketcp_hashsize(maxsockets / 4);
1108 * Our historical default is 512,
1109 * do not autotune lower than this.
1113 if (bootverbose && IS_DEFAULT_VNET(curvnet))
1114 printf("%s: %s auto tuned to %d\n", __func__,
1115 tcbhash_tuneable, hashsize);
1118 * We require a hashsize to be a power of two.
1119 * Previously if it was not a power of two we would just reset it
1120 * back to 512, which could be a nasty surprise if you did not notice
1121 * the error message.
1122 * Instead what we do is clip it to the closest power of two lower
1123 * than the specified hash value.
1125 if (!powerof2(hashsize)) {
1126 int oldhashsize = hashsize;
1128 hashsize = maketcp_hashsize(hashsize);
1129 /* prevent absurdly low value */
1132 printf("%s: WARNING: TCB hash size not a power of 2, "
1133 "clipped from %d to %d.\n", __func__, oldhashsize,
1136 in_pcbinfo_init(&V_tcbinfo, "tcp", &V_tcb, hashsize, hashsize,
1137 "tcp_inpcb", tcp_inpcb_init, IPI_HASHFIELDS_4TUPLE);
1140 * These have to be type stable for the benefit of the timers.
1142 V_tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
1143 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1144 uma_zone_set_max(V_tcpcb_zone, maxsockets);
1145 uma_zone_set_warning(V_tcpcb_zone, "kern.ipc.maxsockets limit reached");
1151 TUNABLE_INT_FETCH("net.inet.tcp.sack.enable", &V_tcp_do_sack);
1152 V_sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
1153 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1155 tcp_fastopen_init();
1157 /* Skip initialization of globals for non-default instances. */
1158 if (!IS_DEFAULT_VNET(curvnet))
1161 tcp_reass_global_init();
1163 /* XXX virtualize those bellow? */
1164 tcp_delacktime = TCPTV_DELACK;
1165 tcp_keepinit = TCPTV_KEEP_INIT;
1166 tcp_keepidle = TCPTV_KEEP_IDLE;
1167 tcp_keepintvl = TCPTV_KEEPINTVL;
1168 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
1169 tcp_msl = TCPTV_MSL;
1170 tcp_rexmit_initial = TCPTV_RTOBASE;
1171 if (tcp_rexmit_initial < 1)
1172 tcp_rexmit_initial = 1;
1173 tcp_rexmit_min = TCPTV_MIN;
1174 if (tcp_rexmit_min < 1)
1176 tcp_persmin = TCPTV_PERSMIN;
1177 tcp_persmax = TCPTV_PERSMAX;
1178 tcp_rexmit_slop = TCPTV_CPU_VAR;
1179 tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT;
1180 tcp_tcbhashsize = hashsize;
1182 /* Setup the tcp function block list */
1183 TAILQ_INIT(&t_functions);
1184 rw_init(&tcp_function_lock, "tcp_func_lock");
1185 register_tcp_functions(&tcp_def_funcblk, M_WAITOK);
1187 /* Initialize the TCP logging data. */
1190 arc4rand(&V_ts_offset_secret, sizeof(V_ts_offset_secret), 0);
1192 if (tcp_soreceive_stream) {
1194 tcp_usrreqs.pru_soreceive = soreceive_stream;
1197 tcp6_usrreqs.pru_soreceive = soreceive_stream;
1202 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
1204 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
1206 if (max_protohdr < TCP_MINPROTOHDR)
1207 max_protohdr = TCP_MINPROTOHDR;
1208 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
1210 #undef TCP_MINPROTOHDR
1213 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
1214 SHUTDOWN_PRI_DEFAULT);
1215 EVENTHANDLER_REGISTER(maxsockets_change, tcp_zone_change, NULL,
1216 EVENTHANDLER_PRI_ANY);
1218 tcp_inp_lro_direct_queue = counter_u64_alloc(M_WAITOK);
1219 tcp_inp_lro_wokeup_queue = counter_u64_alloc(M_WAITOK);
1220 tcp_inp_lro_compressed = counter_u64_alloc(M_WAITOK);
1221 tcp_inp_lro_single_push = counter_u64_alloc(M_WAITOK);
1222 tcp_inp_lro_locks_taken = counter_u64_alloc(M_WAITOK);
1223 tcp_inp_lro_sack_wake = counter_u64_alloc(M_WAITOK);
1231 tcp_destroy(void *unused __unused)
1239 * All our processes are gone, all our sockets should be cleaned
1240 * up, which means, we should be past the tcp_discardcb() calls.
1241 * Sleep to let all tcpcb timers really disappear and cleanup.
1244 INP_LIST_RLOCK(&V_tcbinfo);
1245 n = V_tcbinfo.ipi_count;
1246 INP_LIST_RUNLOCK(&V_tcbinfo);
1249 pause("tcpdes", hz / 10);
1254 in_pcbinfo_destroy(&V_tcbinfo);
1255 /* tcp_discardcb() clears the sack_holes up. */
1256 uma_zdestroy(V_sack_hole_zone);
1257 uma_zdestroy(V_tcpcb_zone);
1260 * Cannot free the zone until all tcpcbs are released as we attach
1261 * the allocations to them.
1263 tcp_fastopen_destroy();
1266 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_IN]);
1268 printf("%s: WARNING: unable to deregister helper hook "
1269 "type=%d, id=%d: error %d returned\n", __func__,
1270 HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, error);
1272 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_OUT]);
1274 printf("%s: WARNING: unable to deregister helper hook "
1275 "type=%d, id=%d: error %d returned\n", __func__,
1276 HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, error);
1280 VNET_SYSUNINIT(tcp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, tcp_destroy, NULL);
1290 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
1291 * tcp_template used to store this data in mbufs, but we now recopy it out
1292 * of the tcpcb each time to conserve mbufs.
1295 tcpip_fillheaders(struct inpcb *inp, void *ip_ptr, void *tcp_ptr)
1297 struct tcphdr *th = (struct tcphdr *)tcp_ptr;
1299 INP_WLOCK_ASSERT(inp);
1302 if ((inp->inp_vflag & INP_IPV6) != 0) {
1303 struct ip6_hdr *ip6;
1305 ip6 = (struct ip6_hdr *)ip_ptr;
1306 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
1307 (inp->inp_flow & IPV6_FLOWINFO_MASK);
1308 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
1309 (IPV6_VERSION & IPV6_VERSION_MASK);
1310 ip6->ip6_nxt = IPPROTO_TCP;
1311 ip6->ip6_plen = htons(sizeof(struct tcphdr));
1312 ip6->ip6_src = inp->in6p_laddr;
1313 ip6->ip6_dst = inp->in6p_faddr;
1316 #if defined(INET6) && defined(INET)
1323 ip = (struct ip *)ip_ptr;
1324 ip->ip_v = IPVERSION;
1326 ip->ip_tos = inp->inp_ip_tos;
1330 ip->ip_ttl = inp->inp_ip_ttl;
1332 ip->ip_p = IPPROTO_TCP;
1333 ip->ip_src = inp->inp_laddr;
1334 ip->ip_dst = inp->inp_faddr;
1337 th->th_sport = inp->inp_lport;
1338 th->th_dport = inp->inp_fport;
1346 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */
1350 * Create template to be used to send tcp packets on a connection.
1351 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
1352 * use for this function is in keepalives, which use tcp_respond.
1355 tcpip_maketemplate(struct inpcb *inp)
1359 t = malloc(sizeof(*t), M_TEMP, M_NOWAIT);
1362 tcpip_fillheaders(inp, (void *)&t->tt_ipgen, (void *)&t->tt_t);
1367 * Send a single message to the TCP at address specified by
1368 * the given TCP/IP header. If m == NULL, then we make a copy
1369 * of the tcpiphdr at th and send directly to the addressed host.
1370 * This is used to force keep alive messages out using the TCP
1371 * template for a connection. If flags are given then we send
1372 * a message back to the TCP which originated the segment th,
1373 * and discard the mbuf containing it and any other attached mbufs.
1375 * In any case the ack and sequence number of the transmitted
1376 * segment are as specified by the parameters.
1378 * NOTE: If m != NULL, then th must point to *inside* the mbuf.
1381 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
1382 tcp_seq ack, tcp_seq seq, int flags)
1391 struct ip6_hdr *ip6;
1394 int optlen, tlen, win;
1397 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
1401 isipv6 = ((struct ip *)ipgen)->ip_v == (IPV6_VERSION >> 4);
1408 KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
1409 INP_WLOCK_ASSERT(inp);
1416 if (!(flags & TH_RST)) {
1417 win = sbspace(&inp->inp_socket->so_rcv);
1418 if (win > TCP_MAXWIN << tp->rcv_scale)
1419 win = TCP_MAXWIN << tp->rcv_scale;
1421 if ((tp->t_flags & TF_NOOPT) == 0)
1425 m = m_gethdr(M_NOWAIT, MT_DATA);
1428 m->m_data += max_linkhdr;
1431 bcopy((caddr_t)ip6, mtod(m, caddr_t),
1432 sizeof(struct ip6_hdr));
1433 ip6 = mtod(m, struct ip6_hdr *);
1434 nth = (struct tcphdr *)(ip6 + 1);
1438 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
1439 ip = mtod(m, struct ip *);
1440 nth = (struct tcphdr *)(ip + 1);
1442 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
1444 } else if (!M_WRITABLE(m)) {
1447 /* Can't reuse 'm', allocate a new mbuf. */
1448 n = m_gethdr(M_NOWAIT, MT_DATA);
1454 if (!m_dup_pkthdr(n, m, M_NOWAIT)) {
1460 n->m_data += max_linkhdr;
1461 /* m_len is set later */
1462 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
1465 bcopy((caddr_t)ip6, mtod(n, caddr_t),
1466 sizeof(struct ip6_hdr));
1467 ip6 = mtod(n, struct ip6_hdr *);
1468 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
1469 nth = (struct tcphdr *)(ip6 + 1);
1473 bcopy((caddr_t)ip, mtod(n, caddr_t), sizeof(struct ip));
1474 ip = mtod(n, struct ip *);
1475 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
1476 nth = (struct tcphdr *)(ip + 1);
1478 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
1479 xchg(nth->th_dport, nth->th_sport, uint16_t);
1486 * XXX MRT We inherit the FIB, which is lucky.
1490 m->m_data = (caddr_t)ipgen;
1491 /* m_len is set later */
1494 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
1495 nth = (struct tcphdr *)(ip6 + 1);
1499 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
1500 nth = (struct tcphdr *)(ip + 1);
1504 * this is usually a case when an extension header
1505 * exists between the IPv6 header and the
1508 nth->th_sport = th->th_sport;
1509 nth->th_dport = th->th_dport;
1511 xchg(nth->th_dport, nth->th_sport, uint16_t);
1517 tlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
1519 #if defined(INET) && defined(INET6)
1523 tlen = sizeof (struct tcpiphdr);
1527 KASSERT(M_TRAILINGSPACE(m) >= tlen,
1528 ("Not enough trailing space for message (m=%p, need=%d, have=%ld)",
1529 m, tlen, (long)M_TRAILINGSPACE(m)));
1534 /* Make sure we have room. */
1535 if (M_TRAILINGSPACE(m) < TCP_MAXOLEN) {
1536 m->m_next = m_get(M_NOWAIT, MT_DATA);
1538 optp = mtod(m->m_next, u_char *);
1543 optp = (u_char *) (nth + 1);
1549 if (tp->t_flags & TF_RCVD_TSTMP) {
1550 to.to_tsval = tcp_ts_getticks() + tp->ts_offset;
1551 to.to_tsecr = tp->ts_recent;
1552 to.to_flags |= TOF_TS;
1554 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1555 /* TCP-MD5 (RFC2385). */
1556 if (tp->t_flags & TF_SIGNATURE)
1557 to.to_flags |= TOF_SIGNATURE;
1559 /* Add the options. */
1560 tlen += optlen = tcp_addoptions(&to, optp);
1562 /* Update m_len in the correct mbuf. */
1563 optm->m_len += optlen;
1569 ip6->ip6_vfc = IPV6_VERSION;
1570 ip6->ip6_nxt = IPPROTO_TCP;
1571 ip6->ip6_plen = htons(tlen - sizeof(*ip6));
1574 #if defined(INET) && defined(INET6)
1579 ip->ip_len = htons(tlen);
1580 ip->ip_ttl = V_ip_defttl;
1581 if (V_path_mtu_discovery)
1582 ip->ip_off |= htons(IP_DF);
1585 m->m_pkthdr.len = tlen;
1586 m->m_pkthdr.rcvif = NULL;
1590 * Packet is associated with a socket, so allow the
1591 * label of the response to reflect the socket label.
1593 INP_WLOCK_ASSERT(inp);
1594 mac_inpcb_create_mbuf(inp, m);
1597 * Packet is not associated with a socket, so possibly
1598 * update the label in place.
1600 mac_netinet_tcp_reply(m);
1603 nth->th_seq = htonl(seq);
1604 nth->th_ack = htonl(ack);
1606 nth->th_off = (sizeof (struct tcphdr) + optlen) >> 2;
1607 nth->th_flags = flags;
1609 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
1611 nth->th_win = htons((u_short)win);
1614 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1615 if (to.to_flags & TOF_SIGNATURE) {
1616 if (!TCPMD5_ENABLED() ||
1617 TCPMD5_OUTPUT(m, nth, to.to_signature) != 0) {
1624 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1627 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
1628 nth->th_sum = in6_cksum_pseudo(ip6,
1629 tlen - sizeof(struct ip6_hdr), IPPROTO_TCP, 0);
1630 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
1634 #if defined(INET6) && defined(INET)
1639 m->m_pkthdr.csum_flags = CSUM_TCP;
1640 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1641 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
1645 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
1646 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
1648 TCP_PROBE3(debug__output, tp, th, m);
1650 TCP_PROBE5(accept__refused, NULL, NULL, m, tp, nth);
1654 TCP_PROBE5(send, NULL, tp, ip6, tp, nth);
1655 (void)ip6_output(m, NULL, NULL, 0, NULL, NULL, inp);
1658 #if defined(INET) && defined(INET6)
1663 TCP_PROBE5(send, NULL, tp, ip, tp, nth);
1664 (void)ip_output(m, NULL, NULL, 0, NULL, inp);
1670 * Create a new TCP control block, making an
1671 * empty reassembly queue and hooking it to the argument
1672 * protocol control block. The `inp' parameter must have
1673 * come from the zone allocator set up in tcp_init().
1676 tcp_newtcpcb(struct inpcb *inp)
1678 struct tcpcb_mem *tm;
1681 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1684 tm = uma_zalloc(V_tcpcb_zone, M_NOWAIT | M_ZERO);
1689 /* Initialise cc_var struct for this tcpcb. */
1691 tp->ccv->type = IPPROTO_TCP;
1692 tp->ccv->ccvc.tcp = tp;
1693 rw_rlock(&tcp_function_lock);
1694 tp->t_fb = tcp_func_set_ptr;
1695 refcount_acquire(&tp->t_fb->tfb_refcnt);
1696 rw_runlock(&tcp_function_lock);
1698 * Use the current system default CC algorithm.
1701 KASSERT(!STAILQ_EMPTY(&cc_list), ("cc_list is empty!"));
1702 CC_ALGO(tp) = CC_DEFAULT();
1705 * The tcpcb will hold a reference on its inpcb until tcp_discardcb()
1708 in_pcbref(inp); /* Reference for tcpcb */
1711 if (CC_ALGO(tp)->cb_init != NULL)
1712 if (CC_ALGO(tp)->cb_init(tp->ccv) > 0) {
1713 if (tp->t_fb->tfb_tcp_fb_fini)
1714 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
1715 in_pcbrele_wlocked(inp);
1716 refcount_release(&tp->t_fb->tfb_refcnt);
1717 uma_zfree(V_tcpcb_zone, tm);
1723 if (khelp_init_osd(HELPER_CLASS_TCP, tp->osd)) {
1724 if (tp->t_fb->tfb_tcp_fb_fini)
1725 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
1726 in_pcbrele_wlocked(inp);
1727 refcount_release(&tp->t_fb->tfb_refcnt);
1728 uma_zfree(V_tcpcb_zone, tm);
1734 tp->t_vnet = inp->inp_vnet;
1736 tp->t_timers = &tm->tt;
1737 TAILQ_INIT(&tp->t_segq);
1740 isipv6 ? V_tcp_v6mssdflt :
1744 /* Set up our timeouts. */
1745 callout_init(&tp->t_timers->tt_rexmt, 1);
1746 callout_init(&tp->t_timers->tt_persist, 1);
1747 callout_init(&tp->t_timers->tt_keep, 1);
1748 callout_init(&tp->t_timers->tt_2msl, 1);
1749 callout_init(&tp->t_timers->tt_delack, 1);
1751 if (V_tcp_do_rfc1323)
1752 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
1754 tp->t_flags |= TF_SACK_PERMIT;
1755 TAILQ_INIT(&tp->snd_holes);
1758 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
1759 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
1760 * reasonable initial retransmit time.
1762 tp->t_srtt = TCPTV_SRTTBASE;
1763 tp->t_rttvar = ((tcp_rexmit_initial - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1764 tp->t_rttmin = tcp_rexmit_min;
1765 tp->t_rxtcur = tcp_rexmit_initial;
1766 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1767 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1768 tp->t_rcvtime = ticks;
1770 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
1771 * because the socket may be bound to an IPv6 wildcard address,
1772 * which may match an IPv4-mapped IPv6 address.
1774 inp->inp_ip_ttl = V_ip_defttl;
1778 * Init the TCP PCAP queues.
1780 tcp_pcap_tcpcb_init(tp);
1783 /* Initialize the per-TCPCB log data. */
1784 tcp_log_tcpcbinit(tp);
1786 tp->t_pacing_rate = -1;
1787 if (tp->t_fb->tfb_tcp_fb_init) {
1788 if ((*tp->t_fb->tfb_tcp_fb_init)(tp)) {
1789 refcount_release(&tp->t_fb->tfb_refcnt);
1790 in_pcbrele_wlocked(inp);
1791 uma_zfree(V_tcpcb_zone, tm);
1796 if (V_tcp_perconn_stats_enable == 1)
1797 tp->t_stats = stats_blob_alloc(V_tcp_perconn_stats_dflt_tpl, 0);
1799 return (tp); /* XXX */
1803 * Switch the congestion control algorithm back to NewReno for any active
1804 * control blocks using an algorithm which is about to go away.
1805 * This ensures the CC framework can allow the unload to proceed without leaving
1806 * any dangling pointers which would trigger a panic.
1807 * Returning non-zero would inform the CC framework that something went wrong
1808 * and it would be unsafe to allow the unload to proceed. However, there is no
1809 * way for this to occur with this implementation so we always return zero.
1812 tcp_ccalgounload(struct cc_algo *unload_algo)
1814 struct cc_algo *tmpalgo;
1817 VNET_ITERATOR_DECL(vnet_iter);
1820 * Check all active control blocks across all network stacks and change
1821 * any that are using "unload_algo" back to NewReno. If "unload_algo"
1822 * requires cleanup code to be run, call it.
1825 VNET_FOREACH(vnet_iter) {
1826 CURVNET_SET(vnet_iter);
1827 INP_INFO_WLOCK(&V_tcbinfo);
1829 * New connections already part way through being initialised
1830 * with the CC algo we're removing will not race with this code
1831 * because the INP_INFO_WLOCK is held during initialisation. We
1832 * therefore don't enter the loop below until the connection
1833 * list has stabilised.
1835 CK_LIST_FOREACH(inp, &V_tcb, inp_list) {
1837 /* Important to skip tcptw structs. */
1838 if (!(inp->inp_flags & INP_TIMEWAIT) &&
1839 (tp = intotcpcb(inp)) != NULL) {
1841 * By holding INP_WLOCK here, we are assured
1842 * that the connection is not currently
1843 * executing inside the CC module's functions
1844 * i.e. it is safe to make the switch back to
1847 if (CC_ALGO(tp) == unload_algo) {
1848 tmpalgo = CC_ALGO(tp);
1849 if (tmpalgo->cb_destroy != NULL)
1850 tmpalgo->cb_destroy(tp->ccv);
1853 * NewReno may allocate memory on
1854 * demand for certain stateful
1855 * configuration as needed, but is
1856 * coded to never fail on memory
1857 * allocation failure so it is a safe
1860 CC_ALGO(tp) = &newreno_cc_algo;
1865 INP_INFO_WUNLOCK(&V_tcbinfo);
1868 VNET_LIST_RUNLOCK();
1874 * Drop a TCP connection, reporting
1875 * the specified error. If connection is synchronized,
1876 * then send a RST to peer.
1879 tcp_drop(struct tcpcb *tp, int errno)
1881 struct socket *so = tp->t_inpcb->inp_socket;
1884 INP_INFO_LOCK_ASSERT(&V_tcbinfo);
1885 INP_WLOCK_ASSERT(tp->t_inpcb);
1887 if (TCPS_HAVERCVDSYN(tp->t_state)) {
1888 tcp_state_change(tp, TCPS_CLOSED);
1889 (void) tp->t_fb->tfb_tcp_output(tp);
1890 TCPSTAT_INC(tcps_drops);
1892 TCPSTAT_INC(tcps_conndrops);
1893 if (errno == ETIMEDOUT && tp->t_softerror)
1894 errno = tp->t_softerror;
1895 so->so_error = errno;
1896 return (tcp_close(tp));
1900 tcp_discardcb(struct tcpcb *tp)
1902 struct inpcb *inp = tp->t_inpcb;
1903 struct socket *so = inp->inp_socket;
1905 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1907 int released __unused;
1909 INP_WLOCK_ASSERT(inp);
1912 * Make sure that all of our timers are stopped before we delete the
1915 * If stopping a timer fails, we schedule a discard function in same
1916 * callout, and the last discard function called will take care of
1917 * deleting the tcpcb.
1919 tp->t_timers->tt_draincnt = 0;
1920 tcp_timer_stop(tp, TT_REXMT);
1921 tcp_timer_stop(tp, TT_PERSIST);
1922 tcp_timer_stop(tp, TT_KEEP);
1923 tcp_timer_stop(tp, TT_2MSL);
1924 tcp_timer_stop(tp, TT_DELACK);
1925 if (tp->t_fb->tfb_tcp_timer_stop_all) {
1927 * Call the stop-all function of the methods,
1928 * this function should call the tcp_timer_stop()
1929 * method with each of the function specific timeouts.
1930 * That stop will be called via the tfb_tcp_timer_stop()
1931 * which should use the async drain function of the
1932 * callout system (see tcp_var.h).
1934 tp->t_fb->tfb_tcp_timer_stop_all(tp);
1938 * If we got enough samples through the srtt filter,
1939 * save the rtt and rttvar in the routing entry.
1940 * 'Enough' is arbitrarily defined as 4 rtt samples.
1941 * 4 samples is enough for the srtt filter to converge
1942 * to within enough % of the correct value; fewer samples
1943 * and we could save a bogus rtt. The danger is not high
1944 * as tcp quickly recovers from everything.
1945 * XXX: Works very well but needs some more statistics!
1947 if (tp->t_rttupdated >= 4) {
1948 struct hc_metrics_lite metrics;
1951 bzero(&metrics, sizeof(metrics));
1953 * Update the ssthresh always when the conditions below
1954 * are satisfied. This gives us better new start value
1955 * for the congestion avoidance for new connections.
1956 * ssthresh is only set if packet loss occurred on a session.
1958 * XXXRW: 'so' may be NULL here, and/or socket buffer may be
1959 * being torn down. Ideally this code would not use 'so'.
1961 ssthresh = tp->snd_ssthresh;
1962 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
1964 * convert the limit from user data bytes to
1965 * packets then to packet data bytes.
1967 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
1970 ssthresh *= (tp->t_maxseg +
1972 (isipv6 ? sizeof (struct ip6_hdr) +
1973 sizeof (struct tcphdr) :
1975 sizeof (struct tcpiphdr)
1982 metrics.rmx_ssthresh = ssthresh;
1984 metrics.rmx_rtt = tp->t_srtt;
1985 metrics.rmx_rttvar = tp->t_rttvar;
1986 metrics.rmx_cwnd = tp->snd_cwnd;
1987 metrics.rmx_sendpipe = 0;
1988 metrics.rmx_recvpipe = 0;
1990 tcp_hc_update(&inp->inp_inc, &metrics);
1993 /* free the reassembly queue, if any */
1994 tcp_reass_flush(tp);
1997 /* Disconnect offload device, if any. */
1998 if (tp->t_flags & TF_TOE)
1999 tcp_offload_detach(tp);
2002 tcp_free_sackholes(tp);
2005 /* Free the TCP PCAP queues. */
2006 tcp_pcap_drain(&(tp->t_inpkts));
2007 tcp_pcap_drain(&(tp->t_outpkts));
2010 /* Allow the CC algorithm to clean up after itself. */
2011 if (CC_ALGO(tp)->cb_destroy != NULL)
2012 CC_ALGO(tp)->cb_destroy(tp->ccv);
2016 khelp_destroy_osd(tp->osd);
2019 stats_blob_destroy(tp->t_stats);
2023 inp->inp_ppcb = NULL;
2024 if (tp->t_timers->tt_draincnt == 0) {
2025 /* We own the last reference on tcpcb, let's free it. */
2027 tcp_log_tcpcbfini(tp);
2029 TCPSTATES_DEC(tp->t_state);
2030 if (tp->t_fb->tfb_tcp_fb_fini)
2031 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
2032 refcount_release(&tp->t_fb->tfb_refcnt);
2034 uma_zfree(V_tcpcb_zone, tp);
2035 released = in_pcbrele_wlocked(inp);
2036 KASSERT(!released, ("%s: inp %p should not have been released "
2037 "here", __func__, inp));
2042 tcp_timer_discard(void *ptp)
2046 struct epoch_tracker et;
2048 tp = (struct tcpcb *)ptp;
2049 CURVNET_SET(tp->t_vnet);
2050 NET_EPOCH_ENTER(et);
2052 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL",
2055 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) != 0,
2056 ("%s: tcpcb has to be stopped here", __func__));
2057 tp->t_timers->tt_draincnt--;
2058 if (tp->t_timers->tt_draincnt == 0) {
2059 /* We own the last reference on this tcpcb, let's free it. */
2061 tcp_log_tcpcbfini(tp);
2063 TCPSTATES_DEC(tp->t_state);
2064 if (tp->t_fb->tfb_tcp_fb_fini)
2065 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
2066 refcount_release(&tp->t_fb->tfb_refcnt);
2068 uma_zfree(V_tcpcb_zone, tp);
2069 if (in_pcbrele_wlocked(inp)) {
2081 * Attempt to close a TCP control block, marking it as dropped, and freeing
2082 * the socket if we hold the only reference.
2085 tcp_close(struct tcpcb *tp)
2087 struct inpcb *inp = tp->t_inpcb;
2090 INP_INFO_LOCK_ASSERT(&V_tcbinfo);
2091 INP_WLOCK_ASSERT(inp);
2094 if (tp->t_state == TCPS_LISTEN)
2095 tcp_offload_listen_stop(tp);
2098 * This releases the TFO pending counter resource for TFO listen
2099 * sockets as well as passively-created TFO sockets that transition
2100 * from SYN_RECEIVED to CLOSED.
2102 if (tp->t_tfo_pending) {
2103 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
2104 tp->t_tfo_pending = NULL;
2107 TCPSTAT_INC(tcps_closed);
2108 if (tp->t_state != TCPS_CLOSED)
2109 tcp_state_change(tp, TCPS_CLOSED);
2110 KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
2111 so = inp->inp_socket;
2112 soisdisconnected(so);
2113 if (inp->inp_flags & INP_SOCKREF) {
2114 KASSERT(so->so_state & SS_PROTOREF,
2115 ("tcp_close: !SS_PROTOREF"));
2116 inp->inp_flags &= ~INP_SOCKREF;
2119 so->so_state &= ~SS_PROTOREF;
2129 VNET_ITERATOR_DECL(vnet_iter);
2134 VNET_LIST_RLOCK_NOSLEEP();
2135 VNET_FOREACH(vnet_iter) {
2136 CURVNET_SET(vnet_iter);
2141 * Walk the tcpbs, if existing, and flush the reassembly queue,
2142 * if there is one...
2143 * XXX: The "Net/3" implementation doesn't imply that the TCP
2144 * reassembly queue should be flushed, but in a situation
2145 * where we're really low on mbufs, this is potentially
2148 INP_INFO_WLOCK(&V_tcbinfo);
2149 CK_LIST_FOREACH(inpb, V_tcbinfo.ipi_listhead, inp_list) {
2151 if (inpb->inp_flags & INP_TIMEWAIT) {
2155 if ((tcpb = intotcpcb(inpb)) != NULL) {
2156 tcp_reass_flush(tcpb);
2157 tcp_clean_sackreport(tcpb);
2159 tcp_log_drain(tcpb);
2162 if (tcp_pcap_aggressive_free) {
2163 /* Free the TCP PCAP queues. */
2164 tcp_pcap_drain(&(tcpb->t_inpkts));
2165 tcp_pcap_drain(&(tcpb->t_outpkts));
2171 INP_INFO_WUNLOCK(&V_tcbinfo);
2174 VNET_LIST_RUNLOCK_NOSLEEP();
2178 * Notify a tcp user of an asynchronous error;
2179 * store error as soft error, but wake up user
2180 * (for now, won't do anything until can select for soft error).
2182 * Do not wake up user since there currently is no mechanism for
2183 * reporting soft errors (yet - a kqueue filter may be added).
2185 static struct inpcb *
2186 tcp_notify(struct inpcb *inp, int error)
2190 INP_INFO_LOCK_ASSERT(&V_tcbinfo);
2191 INP_WLOCK_ASSERT(inp);
2193 if ((inp->inp_flags & INP_TIMEWAIT) ||
2194 (inp->inp_flags & INP_DROPPED))
2197 tp = intotcpcb(inp);
2198 KASSERT(tp != NULL, ("tcp_notify: tp == NULL"));
2201 * Ignore some errors if we are hooked up.
2202 * If connection hasn't completed, has retransmitted several times,
2203 * and receives a second error, give up now. This is better
2204 * than waiting a long time to establish a connection that
2205 * can never complete.
2207 if (tp->t_state == TCPS_ESTABLISHED &&
2208 (error == EHOSTUNREACH || error == ENETUNREACH ||
2209 error == EHOSTDOWN)) {
2210 if (inp->inp_route.ro_nh) {
2211 NH_FREE(inp->inp_route.ro_nh);
2212 inp->inp_route.ro_nh = (struct nhop_object *)NULL;
2215 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
2217 tp = tcp_drop(tp, error);
2223 tp->t_softerror = error;
2227 wakeup( &so->so_timeo);
2234 tcp_pcblist(SYSCTL_HANDLER_ARGS)
2236 struct epoch_tracker et;
2241 if (req->newptr != NULL)
2244 if (req->oldptr == NULL) {
2247 n = V_tcbinfo.ipi_count +
2248 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
2249 n += imax(n / 8, 10);
2250 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb);
2254 if ((error = sysctl_wire_old_buffer(req, 0)) != 0)
2257 bzero(&xig, sizeof(xig));
2258 xig.xig_len = sizeof xig;
2259 xig.xig_count = V_tcbinfo.ipi_count +
2260 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
2261 xig.xig_gen = V_tcbinfo.ipi_gencnt;
2262 xig.xig_sogen = so_gencnt;
2263 error = SYSCTL_OUT(req, &xig, sizeof xig);
2267 error = syncache_pcblist(req);
2271 NET_EPOCH_ENTER(et);
2272 for (inp = CK_LIST_FIRST(V_tcbinfo.ipi_listhead);
2274 inp = CK_LIST_NEXT(inp, inp_list)) {
2276 if (inp->inp_gencnt <= xig.xig_gen) {
2280 * XXX: This use of cr_cansee(), introduced with
2281 * TCP state changes, is not quite right, but for
2282 * now, better than nothing.
2284 if (inp->inp_flags & INP_TIMEWAIT) {
2285 if (intotw(inp) != NULL)
2286 crerr = cr_cansee(req->td->td_ucred,
2287 intotw(inp)->tw_cred);
2289 crerr = EINVAL; /* Skip this inp. */
2291 crerr = cr_canseeinpcb(req->td->td_ucred, inp);
2295 tcp_inptoxtp(inp, &xt);
2297 error = SYSCTL_OUT(req, &xt, sizeof xt);
2310 * Give the user an updated idea of our state.
2311 * If the generation differs from what we told
2312 * her before, she knows that something happened
2313 * while we were processing this request, and it
2314 * might be necessary to retry.
2316 xig.xig_gen = V_tcbinfo.ipi_gencnt;
2317 xig.xig_sogen = so_gencnt;
2318 xig.xig_count = V_tcbinfo.ipi_count +
2319 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
2320 error = SYSCTL_OUT(req, &xig, sizeof xig);
2326 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
2327 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2328 NULL, 0, tcp_pcblist, "S,xtcpcb",
2329 "List of active TCP connections");
2333 tcp_getcred(SYSCTL_HANDLER_ARGS)
2336 struct sockaddr_in addrs[2];
2337 struct epoch_tracker et;
2341 error = priv_check(req->td, PRIV_NETINET_GETCRED);
2344 error = SYSCTL_IN(req, addrs, sizeof(addrs));
2347 NET_EPOCH_ENTER(et);
2348 inp = in_pcblookup(&V_tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
2349 addrs[0].sin_addr, addrs[0].sin_port, INPLOOKUP_RLOCKPCB, NULL);
2352 if (inp->inp_socket == NULL)
2355 error = cr_canseeinpcb(req->td->td_ucred, inp);
2357 cru2x(inp->inp_cred, &xuc);
2362 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
2366 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
2367 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT,
2368 0, 0, tcp_getcred, "S,xucred",
2369 "Get the xucred of a TCP connection");
2374 tcp6_getcred(SYSCTL_HANDLER_ARGS)
2376 struct epoch_tracker et;
2378 struct sockaddr_in6 addrs[2];
2385 error = priv_check(req->td, PRIV_NETINET_GETCRED);
2388 error = SYSCTL_IN(req, addrs, sizeof(addrs));
2391 if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 ||
2392 (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) {
2395 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
2397 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
2404 NET_EPOCH_ENTER(et);
2407 inp = in_pcblookup(&V_tcbinfo,
2408 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
2410 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
2411 addrs[0].sin6_port, INPLOOKUP_RLOCKPCB, NULL);
2414 inp = in6_pcblookup(&V_tcbinfo,
2415 &addrs[1].sin6_addr, addrs[1].sin6_port,
2416 &addrs[0].sin6_addr, addrs[0].sin6_port,
2417 INPLOOKUP_RLOCKPCB, NULL);
2420 if (inp->inp_socket == NULL)
2423 error = cr_canseeinpcb(req->td->td_ucred, inp);
2425 cru2x(inp->inp_cred, &xuc);
2430 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
2434 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
2435 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT,
2436 0, 0, tcp6_getcred, "S,xucred",
2437 "Get the xucred of a TCP6 connection");
2442 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
2444 struct ip *ip = vip;
2446 struct in_addr faddr;
2449 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
2451 struct in_conninfo inc;
2452 tcp_seq icmp_tcp_seq;
2455 faddr = ((struct sockaddr_in *)sa)->sin_addr;
2456 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
2459 if (cmd == PRC_MSGSIZE)
2460 notify = tcp_mtudisc_notify;
2461 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2462 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
2463 cmd == PRC_TIMXCEED_INTRANS) && ip)
2464 notify = tcp_drop_syn_sent;
2467 * Hostdead is ugly because it goes linearly through all PCBs.
2468 * XXX: We never get this from ICMP, otherwise it makes an
2469 * excellent DoS attack on machines with many connections.
2471 else if (cmd == PRC_HOSTDEAD)
2473 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
2477 in_pcbnotifyall(&V_tcbinfo, faddr, inetctlerrmap[cmd], notify);
2481 icp = (struct icmp *)((caddr_t)ip - offsetof(struct icmp, icmp_ip));
2482 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2483 inp = in_pcblookup(&V_tcbinfo, faddr, th->th_dport, ip->ip_src,
2484 th->th_sport, INPLOOKUP_WLOCKPCB, NULL);
2485 if (inp != NULL && PRC_IS_REDIRECT(cmd)) {
2486 /* signal EHOSTDOWN, as it flushes the cached route */
2487 inp = (*notify)(inp, EHOSTDOWN);
2490 icmp_tcp_seq = th->th_seq;
2492 if (!(inp->inp_flags & INP_TIMEWAIT) &&
2493 !(inp->inp_flags & INP_DROPPED) &&
2494 !(inp->inp_socket == NULL)) {
2495 tp = intotcpcb(inp);
2496 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) &&
2497 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) {
2498 if (cmd == PRC_MSGSIZE) {
2501 * If we got a needfrag set the MTU
2502 * in the route to the suggested new
2503 * value (if given) and then notify.
2505 mtu = ntohs(icp->icmp_nextmtu);
2507 * If no alternative MTU was
2508 * proposed, try the next smaller
2513 ntohs(ip->ip_len), 1);
2514 if (mtu < V_tcp_minmss +
2515 sizeof(struct tcpiphdr))
2516 mtu = V_tcp_minmss +
2517 sizeof(struct tcpiphdr);
2519 * Only process the offered MTU if it
2520 * is smaller than the current one.
2522 if (mtu < tp->t_maxseg +
2523 sizeof(struct tcpiphdr)) {
2524 bzero(&inc, sizeof(inc));
2525 inc.inc_faddr = faddr;
2527 inp->inp_inc.inc_fibnum;
2528 tcp_hc_updatemtu(&inc, mtu);
2529 tcp_mtudisc(inp, mtu);
2532 inp = (*notify)(inp,
2533 inetctlerrmap[cmd]);
2537 bzero(&inc, sizeof(inc));
2538 inc.inc_fport = th->th_dport;
2539 inc.inc_lport = th->th_sport;
2540 inc.inc_faddr = faddr;
2541 inc.inc_laddr = ip->ip_src;
2542 syncache_unreach(&inc, icmp_tcp_seq);
2552 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
2554 struct in6_addr *dst;
2555 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
2556 struct ip6_hdr *ip6;
2560 struct icmp6_hdr *icmp6;
2561 struct ip6ctlparam *ip6cp = NULL;
2562 const struct sockaddr_in6 *sa6_src = NULL;
2563 struct in_conninfo inc;
2568 tcp_seq icmp_tcp_seq;
2572 if (sa->sa_family != AF_INET6 ||
2573 sa->sa_len != sizeof(struct sockaddr_in6))
2576 /* if the parameter is from icmp6, decode it. */
2578 ip6cp = (struct ip6ctlparam *)d;
2579 icmp6 = ip6cp->ip6c_icmp6;
2581 ip6 = ip6cp->ip6c_ip6;
2582 off = ip6cp->ip6c_off;
2583 sa6_src = ip6cp->ip6c_src;
2584 dst = ip6cp->ip6c_finaldst;
2588 off = 0; /* fool gcc */
2593 if (cmd == PRC_MSGSIZE)
2594 notify = tcp_mtudisc_notify;
2595 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2596 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
2597 cmd == PRC_TIMXCEED_INTRANS) && ip6 != NULL)
2598 notify = tcp_drop_syn_sent;
2601 * Hostdead is ugly because it goes linearly through all PCBs.
2602 * XXX: We never get this from ICMP, otherwise it makes an
2603 * excellent DoS attack on machines with many connections.
2605 else if (cmd == PRC_HOSTDEAD)
2607 else if ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0)
2611 in6_pcbnotify(&V_tcbinfo, sa, 0,
2612 (const struct sockaddr *)sa6_src,
2613 0, cmd, NULL, notify);
2617 /* Check if we can safely get the ports from the tcp hdr */
2620 (int32_t) (off + sizeof(struct tcp_ports)))) {
2623 bzero(&t_ports, sizeof(struct tcp_ports));
2624 m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports);
2625 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_dst, t_ports.th_dport,
2626 &ip6->ip6_src, t_ports.th_sport, INPLOOKUP_WLOCKPCB, NULL);
2627 if (inp != NULL && PRC_IS_REDIRECT(cmd)) {
2628 /* signal EHOSTDOWN, as it flushes the cached route */
2629 inp = (*notify)(inp, EHOSTDOWN);
2632 off += sizeof(struct tcp_ports);
2633 if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) {
2636 m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq);
2638 if (!(inp->inp_flags & INP_TIMEWAIT) &&
2639 !(inp->inp_flags & INP_DROPPED) &&
2640 !(inp->inp_socket == NULL)) {
2641 tp = intotcpcb(inp);
2642 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) &&
2643 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) {
2644 if (cmd == PRC_MSGSIZE) {
2647 * If we got a needfrag set the MTU
2648 * in the route to the suggested new
2649 * value (if given) and then notify.
2651 mtu = ntohl(icmp6->icmp6_mtu);
2653 * If no alternative MTU was
2654 * proposed, or the proposed
2655 * MTU was too small, set to
2658 if (mtu < IPV6_MMTU)
2659 mtu = IPV6_MMTU - 8;
2660 bzero(&inc, sizeof(inc));
2661 inc.inc_fibnum = M_GETFIB(m);
2662 inc.inc_flags |= INC_ISIPV6;
2663 inc.inc6_faddr = *dst;
2664 if (in6_setscope(&inc.inc6_faddr,
2665 m->m_pkthdr.rcvif, NULL))
2668 * Only process the offered MTU if it
2669 * is smaller than the current one.
2671 if (mtu < tp->t_maxseg +
2672 sizeof (struct tcphdr) +
2673 sizeof (struct ip6_hdr)) {
2674 tcp_hc_updatemtu(&inc, mtu);
2675 tcp_mtudisc(inp, mtu);
2676 ICMP6STAT_INC(icp6s_pmtuchg);
2679 inp = (*notify)(inp,
2680 inet6ctlerrmap[cmd]);
2684 bzero(&inc, sizeof(inc));
2685 inc.inc_fibnum = M_GETFIB(m);
2686 inc.inc_flags |= INC_ISIPV6;
2687 inc.inc_fport = t_ports.th_dport;
2688 inc.inc_lport = t_ports.th_sport;
2689 inc.inc6_faddr = *dst;
2690 inc.inc6_laddr = ip6->ip6_src;
2691 syncache_unreach(&inc, icmp_tcp_seq);
2700 tcp_keyed_hash(struct in_conninfo *inc, u_char *key, u_int len)
2705 KASSERT(len >= SIPHASH_KEY_LENGTH,
2706 ("%s: keylen %u too short ", __func__, len));
2707 SipHash24_Init(&ctx);
2708 SipHash_SetKey(&ctx, (uint8_t *)key);
2709 SipHash_Update(&ctx, &inc->inc_fport, sizeof(uint16_t));
2710 SipHash_Update(&ctx, &inc->inc_lport, sizeof(uint16_t));
2711 switch (inc->inc_flags & INC_ISIPV6) {
2714 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(struct in_addr));
2715 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(struct in_addr));
2720 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(struct in6_addr));
2721 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(struct in6_addr));
2725 SipHash_Final((uint8_t *)hash, &ctx);
2727 return (hash[0] ^ hash[1]);
2731 tcp_new_ts_offset(struct in_conninfo *inc)
2733 struct in_conninfo inc_store, *local_inc;
2735 if (!V_tcp_ts_offset_per_conn) {
2736 memcpy(&inc_store, inc, sizeof(struct in_conninfo));
2737 inc_store.inc_lport = 0;
2738 inc_store.inc_fport = 0;
2739 local_inc = &inc_store;
2743 return (tcp_keyed_hash(local_inc, V_ts_offset_secret,
2744 sizeof(V_ts_offset_secret)));
2748 * Following is where TCP initial sequence number generation occurs.
2750 * There are two places where we must use initial sequence numbers:
2751 * 1. In SYN-ACK packets.
2752 * 2. In SYN packets.
2754 * All ISNs for SYN-ACK packets are generated by the syncache. See
2755 * tcp_syncache.c for details.
2757 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
2758 * depends on this property. In addition, these ISNs should be
2759 * unguessable so as to prevent connection hijacking. To satisfy
2760 * the requirements of this situation, the algorithm outlined in
2761 * RFC 1948 is used, with only small modifications.
2763 * Implementation details:
2765 * Time is based off the system timer, and is corrected so that it
2766 * increases by one megabyte per second. This allows for proper
2767 * recycling on high speed LANs while still leaving over an hour
2770 * As reading the *exact* system time is too expensive to be done
2771 * whenever setting up a TCP connection, we increment the time
2772 * offset in two ways. First, a small random positive increment
2773 * is added to isn_offset for each connection that is set up.
2774 * Second, the function tcp_isn_tick fires once per clock tick
2775 * and increments isn_offset as necessary so that sequence numbers
2776 * are incremented at approximately ISN_BYTES_PER_SECOND. The
2777 * random positive increments serve only to ensure that the same
2778 * exact sequence number is never sent out twice (as could otherwise
2779 * happen when a port is recycled in less than the system tick
2782 * net.inet.tcp.isn_reseed_interval controls the number of seconds
2783 * between seeding of isn_secret. This is normally set to zero,
2784 * as reseeding should not be necessary.
2786 * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
2787 * isn_offset_old, and isn_ctx is performed using the ISN lock. In
2788 * general, this means holding an exclusive (write) lock.
2791 #define ISN_BYTES_PER_SECOND 1048576
2792 #define ISN_STATIC_INCREMENT 4096
2793 #define ISN_RANDOM_INCREMENT (4096 - 1)
2794 #define ISN_SECRET_LENGTH SIPHASH_KEY_LENGTH
2796 VNET_DEFINE_STATIC(u_char, isn_secret[ISN_SECRET_LENGTH]);
2797 VNET_DEFINE_STATIC(int, isn_last);
2798 VNET_DEFINE_STATIC(int, isn_last_reseed);
2799 VNET_DEFINE_STATIC(u_int32_t, isn_offset);
2800 VNET_DEFINE_STATIC(u_int32_t, isn_offset_old);
2802 #define V_isn_secret VNET(isn_secret)
2803 #define V_isn_last VNET(isn_last)
2804 #define V_isn_last_reseed VNET(isn_last_reseed)
2805 #define V_isn_offset VNET(isn_offset)
2806 #define V_isn_offset_old VNET(isn_offset_old)
2809 tcp_new_isn(struct in_conninfo *inc)
2812 u_int32_t projected_offset;
2815 /* Seed if this is the first use, reseed if requested. */
2816 if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) &&
2817 (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz)
2819 arc4rand(&V_isn_secret, sizeof(V_isn_secret), 0);
2820 V_isn_last_reseed = ticks;
2823 /* Compute the hash and return the ISN. */
2824 new_isn = (tcp_seq)tcp_keyed_hash(inc, V_isn_secret,
2825 sizeof(V_isn_secret));
2826 V_isn_offset += ISN_STATIC_INCREMENT +
2827 (arc4random() & ISN_RANDOM_INCREMENT);
2828 if (ticks != V_isn_last) {
2829 projected_offset = V_isn_offset_old +
2830 ISN_BYTES_PER_SECOND / hz * (ticks - V_isn_last);
2831 if (SEQ_GT(projected_offset, V_isn_offset))
2832 V_isn_offset = projected_offset;
2833 V_isn_offset_old = V_isn_offset;
2836 new_isn += V_isn_offset;
2842 * When a specific ICMP unreachable message is received and the
2843 * connection state is SYN-SENT, drop the connection. This behavior
2844 * is controlled by the icmp_may_rst sysctl.
2847 tcp_drop_syn_sent(struct inpcb *inp, int errno)
2852 INP_WLOCK_ASSERT(inp);
2854 if ((inp->inp_flags & INP_TIMEWAIT) ||
2855 (inp->inp_flags & INP_DROPPED))
2858 tp = intotcpcb(inp);
2859 if (tp->t_state != TCPS_SYN_SENT)
2862 if (IS_FASTOPEN(tp->t_flags))
2863 tcp_fastopen_disable_path(tp);
2865 tp = tcp_drop(tp, errno);
2873 * When `need fragmentation' ICMP is received, update our idea of the MSS
2874 * based on the new value. Also nudge TCP to send something, since we
2875 * know the packet we just sent was dropped.
2876 * This duplicates some code in the tcp_mss() function in tcp_input.c.
2878 static struct inpcb *
2879 tcp_mtudisc_notify(struct inpcb *inp, int error)
2882 tcp_mtudisc(inp, -1);
2887 tcp_mtudisc(struct inpcb *inp, int mtuoffer)
2892 INP_WLOCK_ASSERT(inp);
2893 if ((inp->inp_flags & INP_TIMEWAIT) ||
2894 (inp->inp_flags & INP_DROPPED))
2897 tp = intotcpcb(inp);
2898 KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL"));
2900 tcp_mss_update(tp, -1, mtuoffer, NULL, NULL);
2902 so = inp->inp_socket;
2903 SOCKBUF_LOCK(&so->so_snd);
2904 /* If the mss is larger than the socket buffer, decrease the mss. */
2905 if (so->so_snd.sb_hiwat < tp->t_maxseg)
2906 tp->t_maxseg = so->so_snd.sb_hiwat;
2907 SOCKBUF_UNLOCK(&so->so_snd);
2909 TCPSTAT_INC(tcps_mturesent);
2911 tp->snd_nxt = tp->snd_una;
2912 tcp_free_sackholes(tp);
2913 tp->snd_recover = tp->snd_max;
2914 if (tp->t_flags & TF_SACK_PERMIT)
2915 EXIT_FASTRECOVERY(tp->t_flags);
2916 tp->t_fb->tfb_tcp_output(tp);
2921 * Look-up the routing entry to the peer of this inpcb. If no route
2922 * is found and it cannot be allocated, then return 0. This routine
2923 * is called by TCP routines that access the rmx structure and by
2924 * tcp_mss_update to get the peer/interface MTU.
2927 tcp_maxmtu(struct in_conninfo *inc, struct tcp_ifcap *cap)
2929 struct nhop_object *nh;
2931 uint32_t maxmtu = 0;
2933 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
2935 if (inc->inc_faddr.s_addr != INADDR_ANY) {
2936 nh = fib4_lookup(inc->inc_fibnum, inc->inc_faddr, 0, NHR_NONE, 0);
2941 maxmtu = nh->nh_mtu;
2943 /* Report additional interface capabilities. */
2945 if (ifp->if_capenable & IFCAP_TSO4 &&
2946 ifp->if_hwassist & CSUM_TSO) {
2947 cap->ifcap |= CSUM_TSO;
2948 cap->tsomax = ifp->if_hw_tsomax;
2949 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount;
2950 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize;
2960 tcp_maxmtu6(struct in_conninfo *inc, struct tcp_ifcap *cap)
2962 struct nhop_object *nh;
2963 struct in6_addr dst6;
2966 uint32_t maxmtu = 0;
2968 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
2970 if (inc->inc_flags & INC_IPV6MINMTU)
2973 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
2974 in6_splitscope(&inc->inc6_faddr, &dst6, &scopeid);
2975 nh = fib6_lookup(inc->inc_fibnum, &dst6, scopeid, NHR_NONE, 0);
2980 maxmtu = nh->nh_mtu;
2982 /* Report additional interface capabilities. */
2984 if (ifp->if_capenable & IFCAP_TSO6 &&
2985 ifp->if_hwassist & CSUM_TSO) {
2986 cap->ifcap |= CSUM_TSO;
2987 cap->tsomax = ifp->if_hw_tsomax;
2988 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount;
2989 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize;
2999 * Calculate effective SMSS per RFC5681 definition for a given TCP
3000 * connection at its current state, taking into account SACK and etc.
3003 tcp_maxseg(const struct tcpcb *tp)
3007 if (tp->t_flags & TF_NOOPT)
3008 return (tp->t_maxseg);
3011 * Here we have a simplified code from tcp_addoptions(),
3012 * without a proper loop, and having most of paddings hardcoded.
3013 * We might make mistakes with padding here in some edge cases,
3014 * but this is harmless, since result of tcp_maxseg() is used
3015 * only in cwnd and ssthresh estimations.
3017 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
3018 if (tp->t_flags & TF_RCVD_TSTMP)
3019 optlen = TCPOLEN_TSTAMP_APPA;
3022 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3023 if (tp->t_flags & TF_SIGNATURE)
3024 optlen += PADTCPOLEN(TCPOLEN_SIGNATURE);
3026 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks > 0) {
3027 optlen += TCPOLEN_SACKHDR;
3028 optlen += tp->rcv_numsacks * TCPOLEN_SACK;
3029 optlen = PADTCPOLEN(optlen);
3032 if (tp->t_flags & TF_REQ_TSTMP)
3033 optlen = TCPOLEN_TSTAMP_APPA;
3035 optlen = PADTCPOLEN(TCPOLEN_MAXSEG);
3036 if (tp->t_flags & TF_REQ_SCALE)
3037 optlen += PADTCPOLEN(TCPOLEN_WINDOW);
3038 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3039 if (tp->t_flags & TF_SIGNATURE)
3040 optlen += PADTCPOLEN(TCPOLEN_SIGNATURE);
3042 if (tp->t_flags & TF_SACK_PERMIT)
3043 optlen += PADTCPOLEN(TCPOLEN_SACK_PERMITTED);
3046 optlen = min(optlen, TCP_MAXOLEN);
3047 return (tp->t_maxseg - optlen);
3051 sysctl_drop(SYSCTL_HANDLER_ARGS)
3053 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
3054 struct sockaddr_storage addrs[2];
3058 struct sockaddr_in *fin, *lin;
3059 struct epoch_tracker et;
3061 struct sockaddr_in6 *fin6, *lin6;
3072 if (req->oldptr != NULL || req->oldlen != 0)
3074 if (req->newptr == NULL)
3076 if (req->newlen < sizeof(addrs))
3078 error = SYSCTL_IN(req, &addrs, sizeof(addrs));
3082 switch (addrs[0].ss_family) {
3085 fin6 = (struct sockaddr_in6 *)&addrs[0];
3086 lin6 = (struct sockaddr_in6 *)&addrs[1];
3087 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
3088 lin6->sin6_len != sizeof(struct sockaddr_in6))
3090 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
3091 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
3093 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
3094 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
3095 fin = (struct sockaddr_in *)&addrs[0];
3096 lin = (struct sockaddr_in *)&addrs[1];
3099 error = sa6_embedscope(fin6, V_ip6_use_defzone);
3102 error = sa6_embedscope(lin6, V_ip6_use_defzone);
3109 fin = (struct sockaddr_in *)&addrs[0];
3110 lin = (struct sockaddr_in *)&addrs[1];
3111 if (fin->sin_len != sizeof(struct sockaddr_in) ||
3112 lin->sin_len != sizeof(struct sockaddr_in))
3119 NET_EPOCH_ENTER(et);
3120 switch (addrs[0].ss_family) {
3123 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr,
3124 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port,
3125 INPLOOKUP_WLOCKPCB, NULL);
3130 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port,
3131 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL);
3136 if (inp->inp_flags & INP_TIMEWAIT) {
3138 * XXXRW: There currently exists a state where an
3139 * inpcb is present, but its timewait state has been
3140 * discarded. For now, don't allow dropping of this
3148 } else if (!(inp->inp_flags & INP_DROPPED) &&
3149 !(inp->inp_socket->so_options & SO_ACCEPTCONN)) {
3150 tp = intotcpcb(inp);
3151 tp = tcp_drop(tp, ECONNABORTED);
3162 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop,
3163 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3164 CTLFLAG_NEEDGIANT, NULL, 0, sysctl_drop, "",
3165 "Drop TCP connection");
3169 sysctl_switch_tls(SYSCTL_HANDLER_ARGS)
3171 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
3172 struct sockaddr_storage addrs[2];
3174 struct sockaddr_in *fin, *lin;
3175 struct epoch_tracker et;
3177 struct sockaddr_in6 *fin6, *lin6;
3188 if (req->oldptr != NULL || req->oldlen != 0)
3190 if (req->newptr == NULL)
3192 if (req->newlen < sizeof(addrs))
3194 error = SYSCTL_IN(req, &addrs, sizeof(addrs));
3198 switch (addrs[0].ss_family) {
3201 fin6 = (struct sockaddr_in6 *)&addrs[0];
3202 lin6 = (struct sockaddr_in6 *)&addrs[1];
3203 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
3204 lin6->sin6_len != sizeof(struct sockaddr_in6))
3206 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
3207 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
3209 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
3210 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
3211 fin = (struct sockaddr_in *)&addrs[0];
3212 lin = (struct sockaddr_in *)&addrs[1];
3215 error = sa6_embedscope(fin6, V_ip6_use_defzone);
3218 error = sa6_embedscope(lin6, V_ip6_use_defzone);
3225 fin = (struct sockaddr_in *)&addrs[0];
3226 lin = (struct sockaddr_in *)&addrs[1];
3227 if (fin->sin_len != sizeof(struct sockaddr_in) ||
3228 lin->sin_len != sizeof(struct sockaddr_in))
3235 NET_EPOCH_ENTER(et);
3236 switch (addrs[0].ss_family) {
3239 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr,
3240 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port,
3241 INPLOOKUP_WLOCKPCB, NULL);
3246 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port,
3247 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL);
3253 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) != 0 ||
3254 inp->inp_socket == NULL) {
3260 so = inp->inp_socket;
3262 error = ktls_set_tx_mode(so,
3263 arg2 == 0 ? TCP_TLS_MODE_SW : TCP_TLS_MODE_IFNET);
3273 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_sw_tls,
3274 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3275 CTLFLAG_NEEDGIANT, NULL, 0, sysctl_switch_tls, "",
3276 "Switch TCP connection to SW TLS");
3277 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_ifnet_tls,
3278 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3279 CTLFLAG_NEEDGIANT, NULL, 1, sysctl_switch_tls, "",
3280 "Switch TCP connection to ifnet TLS");
3284 * Generate a standardized TCP log line for use throughout the
3285 * tcp subsystem. Memory allocation is done with M_NOWAIT to
3286 * allow use in the interrupt context.
3288 * NB: The caller MUST free(s, M_TCPLOG) the returned string.
3289 * NB: The function may return NULL if memory allocation failed.
3291 * Due to header inclusion and ordering limitations the struct ip
3292 * and ip6_hdr pointers have to be passed as void pointers.
3295 tcp_log_vain(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
3299 /* Is logging enabled? */
3300 if (V_tcp_log_in_vain == 0)
3303 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
3307 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
3311 /* Is logging enabled? */
3312 if (tcp_log_debug == 0)
3315 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
3319 tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
3326 const struct ip6_hdr *ip6;
3328 ip6 = (const struct ip6_hdr *)ip6hdr;
3330 ip = (struct ip *)ip4hdr;
3333 * The log line looks like this:
3334 * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>"
3336 size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") +
3337 sizeof(PRINT_TH_FLAGS) + 1 +
3339 2 * INET6_ADDRSTRLEN;
3341 2 * INET_ADDRSTRLEN;
3344 s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT);
3348 strcat(s, "TCP: [");
3351 if (inc && ((inc->inc_flags & INC_ISIPV6) == 0)) {
3352 inet_ntoa_r(inc->inc_faddr, sp);
3354 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
3356 inet_ntoa_r(inc->inc_laddr, sp);
3358 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
3361 ip6_sprintf(sp, &inc->inc6_faddr);
3363 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
3365 ip6_sprintf(sp, &inc->inc6_laddr);
3367 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
3368 } else if (ip6 && th) {
3369 ip6_sprintf(sp, &ip6->ip6_src);
3371 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
3373 ip6_sprintf(sp, &ip6->ip6_dst);
3375 sprintf(sp, "]:%i", ntohs(th->th_dport));
3378 } else if (ip && th) {
3379 inet_ntoa_r(ip->ip_src, sp);
3381 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
3383 inet_ntoa_r(ip->ip_dst, sp);
3385 sprintf(sp, "]:%i", ntohs(th->th_dport));
3393 sprintf(sp, " tcpflags 0x%b", th->th_flags, PRINT_TH_FLAGS);
3394 if (*(s + size - 1) != '\0')
3395 panic("%s: string too long", __func__);
3400 * A subroutine which makes it easy to track TCP state changes with DTrace.
3401 * This function shouldn't be called for t_state initializations that don't
3402 * correspond to actual TCP state transitions.
3405 tcp_state_change(struct tcpcb *tp, int newstate)
3407 #if defined(KDTRACE_HOOKS)
3408 int pstate = tp->t_state;
3411 TCPSTATES_DEC(tp->t_state);
3412 TCPSTATES_INC(newstate);
3413 tp->t_state = newstate;
3414 TCP_PROBE6(state__change, NULL, tp, NULL, tp, NULL, pstate);
3418 * Create an external-format (``xtcpcb'') structure using the information in
3419 * the kernel-format tcpcb structure pointed to by tp. This is done to
3420 * reduce the spew of irrelevant information over this interface, to isolate
3421 * user code from changes in the kernel structure, and potentially to provide
3422 * information-hiding if we decide that some of this information should be
3423 * hidden from users.
3426 tcp_inptoxtp(const struct inpcb *inp, struct xtcpcb *xt)
3428 struct tcpcb *tp = intotcpcb(inp);
3431 bzero(xt, sizeof(*xt));
3432 if (inp->inp_flags & INP_TIMEWAIT) {
3433 xt->t_state = TCPS_TIME_WAIT;
3435 xt->t_state = tp->t_state;
3436 xt->t_logstate = tp->t_logstate;
3437 xt->t_flags = tp->t_flags;
3438 xt->t_sndzerowin = tp->t_sndzerowin;
3439 xt->t_sndrexmitpack = tp->t_sndrexmitpack;
3440 xt->t_rcvoopack = tp->t_rcvoopack;
3441 xt->t_rcv_wnd = tp->rcv_wnd;
3442 xt->t_snd_wnd = tp->snd_wnd;
3443 xt->t_snd_cwnd = tp->snd_cwnd;
3444 xt->t_snd_ssthresh = tp->snd_ssthresh;
3445 xt->t_maxseg = tp->t_maxseg;
3446 xt->xt_ecn = (tp->t_flags2 & TF2_ECN_PERMIT) ? 1 : 0 +
3447 (tp->t_flags2 & TF2_ACE_PERMIT) ? 2 : 0;
3449 now = getsbinuptime();
3450 #define COPYTIMER(ttt) do { \
3451 if (callout_active(&tp->t_timers->ttt)) \
3452 xt->ttt = (tp->t_timers->ttt.c_time - now) / \
3457 COPYTIMER(tt_delack);
3458 COPYTIMER(tt_rexmt);
3459 COPYTIMER(tt_persist);
3463 xt->t_rcvtime = 1000 * (ticks - tp->t_rcvtime) / hz;
3465 bcopy(tp->t_fb->tfb_tcp_block_name, xt->xt_stack,
3466 TCP_FUNCTION_NAME_LEN_MAX);
3467 bcopy(CC_ALGO(tp)->name, xt->xt_cc,
3470 (void)tcp_log_get_id(tp, xt->xt_logid);
3474 xt->xt_len = sizeof(struct xtcpcb);
3475 in_pcbtoxinpcb(inp, &xt->xt_inp);
3476 if (inp->inp_socket == NULL)
3477 xt->xt_inp.xi_socket.xso_protocol = IPPROTO_TCP;
3481 tcp_log_end_status(struct tcpcb *tp, uint8_t status)
3486 (status > TCP_EI_STATUS_MAX_VALUE) ||
3491 if (status > (sizeof(uint32_t) * 8)) {
3492 /* Should this be a KASSERT? */
3495 bit = 1U << (status - 1);
3496 if (bit & tp->t_end_info_status) {
3497 /* already logged */
3500 for (i = 0; i < TCP_END_BYTE_INFO; i++) {
3501 if (tp->t_end_info_bytes[i] == TCP_EI_EMPTY_SLOT) {
3502 tp->t_end_info_bytes[i] = status;
3503 tp->t_end_info_status |= bit;