2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 #include "opt_inet6.h"
39 #include "opt_ipsec.h"
40 #include "opt_kern_tls.h"
41 #include "opt_tcpdebug.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
46 #include <sys/callout.h>
47 #include <sys/eventhandler.h>
49 #include <sys/hhook.h>
51 #include <sys/kernel.h>
53 #include <sys/khelp.h>
58 #include <sys/qmath.h>
59 #include <sys/stats.h>
60 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/refcount.h>
68 #include <sys/socket.h>
69 #include <sys/socketvar.h>
70 #include <sys/protosw.h>
71 #include <sys/random.h>
75 #include <net/route.h>
76 #include <net/route/nhop.h>
78 #include <net/if_var.h>
81 #include <netinet/in.h>
82 #include <netinet/in_fib.h>
83 #include <netinet/in_kdtrace.h>
84 #include <netinet/in_pcb.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/in_var.h>
87 #include <netinet/ip.h>
88 #include <netinet/ip_icmp.h>
89 #include <netinet/ip_var.h>
91 #include <netinet/icmp6.h>
92 #include <netinet/ip6.h>
93 #include <netinet6/in6_fib.h>
94 #include <netinet6/in6_pcb.h>
95 #include <netinet6/ip6_var.h>
96 #include <netinet6/scope6_var.h>
97 #include <netinet6/nd6.h>
100 #include <netinet/tcp.h>
104 #include <netinet/tcp_fsm.h>
105 #include <netinet/tcp_seq.h>
106 #include <netinet/tcp_timer.h>
107 #include <netinet/tcp_var.h>
108 #include <netinet/tcp_log_buf.h>
109 #include <netinet/tcp_syncache.h>
110 #include <netinet/tcp_hpts.h>
111 #include <netinet/cc/cc.h>
113 #include <netinet6/tcp6_var.h>
115 #include <netinet/tcpip.h>
116 #include <netinet/tcp_fastopen.h>
118 #include <netinet/tcp_pcap.h>
121 #include <netinet/tcp_debug.h>
124 #include <netinet6/ip6protosw.h>
127 #include <netinet/tcp_offload.h>
129 #include <netinet/udp.h>
130 #include <netinet/udp_var.h>
132 #include <netipsec/ipsec_support.h>
134 #include <machine/in_cksum.h>
135 #include <crypto/siphash/siphash.h>
137 #include <security/mac/mac_framework.h>
139 VNET_DEFINE(int, tcp_mssdflt) = TCP_MSS;
141 VNET_DEFINE(int, tcp_v6mssdflt) = TCP6_MSS;
144 #ifdef NETFLIX_EXP_DETECTION
145 /* Sack attack detection thresholds and such */
146 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack_attack,
147 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
148 "Sack Attack detection thresholds");
149 int32_t tcp_force_detection = 0;
150 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, force_detection,
152 &tcp_force_detection, 0,
153 "Do we force detection even if the INP has it off?");
154 int32_t tcp_sack_to_ack_thresh = 700; /* 70 % */
155 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sack_to_ack_thresh,
157 &tcp_sack_to_ack_thresh, 700,
158 "Percentage of sacks to acks we must see above (10.1 percent is 101)?");
159 int32_t tcp_sack_to_move_thresh = 600; /* 60 % */
160 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, move_thresh,
162 &tcp_sack_to_move_thresh, 600,
163 "Percentage of sack moves we must see above (10.1 percent is 101)");
164 int32_t tcp_restoral_thresh = 650; /* 65 % (sack:2:ack -5%) */
165 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, restore_thresh,
167 &tcp_restoral_thresh, 550,
168 "Percentage of sack to ack percentage we must see below to restore(10.1 percent is 101)");
169 int32_t tcp_sad_decay_val = 800;
170 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, decay_per,
172 &tcp_sad_decay_val, 800,
173 "The decay percentage (10.1 percent equals 101 )");
174 int32_t tcp_map_minimum = 500;
175 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, nummaps,
177 &tcp_map_minimum, 500,
178 "Number of Map enteries before we start detection");
179 int32_t tcp_attack_on_turns_on_logging = 0;
180 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, attacks_logged,
182 &tcp_attack_on_turns_on_logging, 0,
183 "When we have a positive hit on attack, do we turn on logging?");
184 int32_t tcp_sad_pacing_interval = 2000;
185 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_pacing_int,
187 &tcp_sad_pacing_interval, 2000,
188 "What is the minimum pacing interval for a classified attacker?");
190 int32_t tcp_sad_low_pps = 100;
191 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_low_pps,
193 &tcp_sad_low_pps, 100,
194 "What is the input pps that below which we do not decay?");
196 uint32_t tcp_ack_war_time_window = 1000;
197 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_timewindow,
199 &tcp_ack_war_time_window, 1000,
200 "If the tcp_stack does ack-war prevention how many milliseconds are in its time window?");
201 uint32_t tcp_ack_war_cnt = 5;
202 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_cnt,
205 "If the tcp_stack does ack-war prevention how many acks can be sent in its time window?");
207 struct rwlock tcp_function_lock;
210 sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS)
215 error = sysctl_handle_int(oidp, &new, 0, req);
216 if (error == 0 && req->newptr) {
217 if (new < TCP_MINMSS)
225 SYSCTL_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt,
226 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
227 &VNET_NAME(tcp_mssdflt), 0, &sysctl_net_inet_tcp_mss_check, "I",
228 "Default TCP Maximum Segment Size");
232 sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS)
236 new = V_tcp_v6mssdflt;
237 error = sysctl_handle_int(oidp, &new, 0, req);
238 if (error == 0 && req->newptr) {
239 if (new < TCP_MINMSS)
242 V_tcp_v6mssdflt = new;
247 SYSCTL_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
248 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
249 &VNET_NAME(tcp_v6mssdflt), 0, &sysctl_net_inet_tcp_mss_v6_check, "I",
250 "Default TCP Maximum Segment Size for IPv6");
254 * Minimum MSS we accept and use. This prevents DoS attacks where
255 * we are forced to a ridiculous low MSS like 20 and send hundreds
256 * of packets instead of one. The effect scales with the available
257 * bandwidth and quickly saturates the CPU and network interface
258 * with packet generation and sending. Set to zero to disable MINMSS
259 * checking. This setting prevents us from sending too small packets.
261 VNET_DEFINE(int, tcp_minmss) = TCP_MINMSS;
262 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_VNET | CTLFLAG_RW,
263 &VNET_NAME(tcp_minmss), 0,
264 "Minimum TCP Maximum Segment Size");
266 VNET_DEFINE(int, tcp_do_rfc1323) = 1;
267 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_VNET | CTLFLAG_RW,
268 &VNET_NAME(tcp_do_rfc1323), 0,
269 "Enable rfc1323 (high performance TCP) extensions");
272 * As of June 2021, several TCP stacks violate RFC 7323 from September 2014.
273 * Some stacks negotiate TS, but never send them after connection setup. Some
274 * stacks negotiate TS, but don't send them when sending keep-alive segments.
275 * These include modern widely deployed TCP stacks.
276 * Therefore tolerating violations for now...
278 VNET_DEFINE(int, tcp_tolerate_missing_ts) = 1;
279 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tolerate_missing_ts, CTLFLAG_VNET | CTLFLAG_RW,
280 &VNET_NAME(tcp_tolerate_missing_ts), 0,
281 "Tolerate missing TCP timestamps");
283 VNET_DEFINE(int, tcp_ts_offset_per_conn) = 1;
284 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ts_offset_per_conn, CTLFLAG_VNET | CTLFLAG_RW,
285 &VNET_NAME(tcp_ts_offset_per_conn), 0,
286 "Initialize TCP timestamps per connection instead of per host pair");
288 /* How many connections are pacing */
289 static volatile uint32_t number_of_tcp_connections_pacing = 0;
290 static uint32_t shadow_num_connections = 0;
292 static int tcp_pacing_limit = 10000;
293 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pacing_limit, CTLFLAG_RW,
294 &tcp_pacing_limit, 1000,
295 "If the TCP stack does pacing, is there a limit (-1 = no, 0 = no pacing N = number of connections)");
297 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pacing_count, CTLFLAG_RD,
298 &shadow_num_connections, 0, "Number of TCP connections being paced");
300 static int tcp_log_debug = 0;
301 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW,
302 &tcp_log_debug, 0, "Log errors caused by incoming TCP segments");
304 static int tcp_tcbhashsize;
305 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
306 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
308 static int do_tcpdrain = 1;
309 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
310 "Enable tcp_drain routine for extra help when low on mbufs");
312 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_VNET | CTLFLAG_RD,
313 &VNET_NAME(tcbinfo.ipi_count), 0, "Number of active PCBs");
315 VNET_DEFINE_STATIC(int, icmp_may_rst) = 1;
316 #define V_icmp_may_rst VNET(icmp_may_rst)
317 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_VNET | CTLFLAG_RW,
318 &VNET_NAME(icmp_may_rst), 0,
319 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
321 VNET_DEFINE_STATIC(int, tcp_isn_reseed_interval) = 0;
322 #define V_tcp_isn_reseed_interval VNET(tcp_isn_reseed_interval)
323 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_VNET | CTLFLAG_RW,
324 &VNET_NAME(tcp_isn_reseed_interval), 0,
325 "Seconds between reseeding of ISN secret");
327 static int tcp_soreceive_stream;
328 SYSCTL_INT(_net_inet_tcp, OID_AUTO, soreceive_stream, CTLFLAG_RDTUN,
329 &tcp_soreceive_stream, 0, "Using soreceive_stream for TCP sockets");
331 VNET_DEFINE(uma_zone_t, sack_hole_zone);
332 #define V_sack_hole_zone VNET(sack_hole_zone)
333 VNET_DEFINE(uint32_t, tcp_map_entries_limit) = 0; /* unlimited */
335 sysctl_net_inet_tcp_map_limit_check(SYSCTL_HANDLER_ARGS)
340 new = V_tcp_map_entries_limit;
341 error = sysctl_handle_int(oidp, &new, 0, req);
342 if (error == 0 && req->newptr) {
343 /* only allow "0" and value > minimum */
344 if (new > 0 && new < TCP_MIN_MAP_ENTRIES_LIMIT)
347 V_tcp_map_entries_limit = new;
351 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, map_limit,
352 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
353 &VNET_NAME(tcp_map_entries_limit), 0,
354 &sysctl_net_inet_tcp_map_limit_check, "IU",
355 "Total sendmap entries limit");
357 VNET_DEFINE(uint32_t, tcp_map_split_limit) = 0; /* unlimited */
358 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, split_limit, CTLFLAG_VNET | CTLFLAG_RW,
359 &VNET_NAME(tcp_map_split_limit), 0,
360 "Total sendmap split entries limit");
363 VNET_DEFINE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST+1]);
366 #define TS_OFFSET_SECRET_LENGTH SIPHASH_KEY_LENGTH
367 VNET_DEFINE_STATIC(u_char, ts_offset_secret[TS_OFFSET_SECRET_LENGTH]);
368 #define V_ts_offset_secret VNET(ts_offset_secret)
370 static int tcp_default_fb_init(struct tcpcb *tp);
371 static void tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged);
372 static int tcp_default_handoff_ok(struct tcpcb *tp);
373 static struct inpcb *tcp_notify(struct inpcb *, int);
374 static struct inpcb *tcp_mtudisc_notify(struct inpcb *, int);
375 static struct inpcb *tcp_mtudisc(struct inpcb *, int);
376 static char * tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th,
377 const void *ip4hdr, const void *ip6hdr);
379 static struct tcp_function_block tcp_def_funcblk = {
380 .tfb_tcp_block_name = "freebsd",
381 .tfb_tcp_output = tcp_default_output,
382 .tfb_tcp_do_segment = tcp_do_segment,
383 .tfb_tcp_ctloutput = tcp_default_ctloutput,
384 .tfb_tcp_handoff_ok = tcp_default_handoff_ok,
385 .tfb_tcp_fb_init = tcp_default_fb_init,
386 .tfb_tcp_fb_fini = tcp_default_fb_fini,
389 static int tcp_fb_cnt = 0;
390 struct tcp_funchead t_functions;
391 static struct tcp_function_block *tcp_func_set_ptr = &tcp_def_funcblk;
394 tcp_record_dsack(struct tcpcb *tp, tcp_seq start, tcp_seq end, int tlp)
396 TCPSTAT_INC(tcps_dsack_count);
399 if (SEQ_GT(end, start)) {
400 tp->t_dsack_bytes += (end - start);
401 TCPSTAT_ADD(tcps_dsack_bytes, (end - start));
403 tp->t_dsack_tlp_bytes += (start - end);
404 TCPSTAT_ADD(tcps_dsack_bytes, (start - end));
407 if (SEQ_GT(end, start)) {
408 tp->t_dsack_bytes += (end - start);
409 TCPSTAT_ADD(tcps_dsack_tlp_bytes, (end - start));
411 tp->t_dsack_tlp_bytes += (start - end);
412 TCPSTAT_ADD(tcps_dsack_tlp_bytes, (start - end));
417 static struct tcp_function_block *
418 find_tcp_functions_locked(struct tcp_function_set *fs)
420 struct tcp_function *f;
421 struct tcp_function_block *blk=NULL;
423 TAILQ_FOREACH(f, &t_functions, tf_next) {
424 if (strcmp(f->tf_name, fs->function_set_name) == 0) {
432 static struct tcp_function_block *
433 find_tcp_fb_locked(struct tcp_function_block *blk, struct tcp_function **s)
435 struct tcp_function_block *rblk=NULL;
436 struct tcp_function *f;
438 TAILQ_FOREACH(f, &t_functions, tf_next) {
439 if (f->tf_fb == blk) {
450 struct tcp_function_block *
451 find_and_ref_tcp_functions(struct tcp_function_set *fs)
453 struct tcp_function_block *blk;
455 rw_rlock(&tcp_function_lock);
456 blk = find_tcp_functions_locked(fs);
458 refcount_acquire(&blk->tfb_refcnt);
459 rw_runlock(&tcp_function_lock);
463 struct tcp_function_block *
464 find_and_ref_tcp_fb(struct tcp_function_block *blk)
466 struct tcp_function_block *rblk;
468 rw_rlock(&tcp_function_lock);
469 rblk = find_tcp_fb_locked(blk, NULL);
471 refcount_acquire(&rblk->tfb_refcnt);
472 rw_runlock(&tcp_function_lock);
476 /* Find a matching alias for the given tcp_function_block. */
478 find_tcp_function_alias(struct tcp_function_block *blk,
479 struct tcp_function_set *fs)
481 struct tcp_function *f;
485 rw_rlock(&tcp_function_lock);
486 TAILQ_FOREACH(f, &t_functions, tf_next) {
487 if ((f->tf_fb == blk) &&
488 (strncmp(f->tf_name, blk->tfb_tcp_block_name,
489 TCP_FUNCTION_NAME_LEN_MAX) != 0)) {
490 /* Matching function block with different name. */
491 strncpy(fs->function_set_name, f->tf_name,
492 TCP_FUNCTION_NAME_LEN_MAX);
497 /* Null terminate the string appropriately. */
499 fs->function_set_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0';
501 fs->function_set_name[0] = '\0';
503 rw_runlock(&tcp_function_lock);
507 static struct tcp_function_block *
508 find_and_ref_tcp_default_fb(void)
510 struct tcp_function_block *rblk;
512 rw_rlock(&tcp_function_lock);
513 rblk = tcp_func_set_ptr;
514 refcount_acquire(&rblk->tfb_refcnt);
515 rw_runlock(&tcp_function_lock);
520 tcp_switch_back_to_default(struct tcpcb *tp)
522 struct tcp_function_block *tfb;
524 KASSERT(tp->t_fb != &tcp_def_funcblk,
525 ("%s: called by the built-in default stack", __func__));
528 * Release the old stack. This function will either find a new one
531 if (tp->t_fb->tfb_tcp_fb_fini != NULL)
532 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 0);
533 refcount_release(&tp->t_fb->tfb_refcnt);
536 * Now, we'll find a new function block to use.
537 * Start by trying the current user-selected
538 * default, unless this stack is the user-selected
541 tfb = find_and_ref_tcp_default_fb();
542 if (tfb == tp->t_fb) {
543 refcount_release(&tfb->tfb_refcnt);
546 /* Does the stack accept this connection? */
547 if (tfb != NULL && tfb->tfb_tcp_handoff_ok != NULL &&
548 (*tfb->tfb_tcp_handoff_ok)(tp)) {
549 refcount_release(&tfb->tfb_refcnt);
552 /* Try to use that stack. */
554 /* Initialize the new stack. If it succeeds, we are done. */
556 if (tp->t_fb->tfb_tcp_fb_init == NULL ||
557 (*tp->t_fb->tfb_tcp_fb_init)(tp) == 0)
561 * Initialization failed. Release the reference count on
564 refcount_release(&tfb->tfb_refcnt);
568 * If that wasn't feasible, use the built-in default
569 * stack which is not allowed to reject anyone.
571 tfb = find_and_ref_tcp_fb(&tcp_def_funcblk);
573 /* there always should be a default */
574 panic("Can't refer to tcp_def_funcblk");
576 if (tfb->tfb_tcp_handoff_ok != NULL) {
577 if ((*tfb->tfb_tcp_handoff_ok) (tp)) {
578 /* The default stack cannot say no */
579 panic("Default stack rejects a new session?");
583 if (tp->t_fb->tfb_tcp_fb_init != NULL &&
584 (*tp->t_fb->tfb_tcp_fb_init)(tp)) {
585 /* The default stack cannot fail */
586 panic("Default stack initialization failed");
591 tcp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
592 const struct sockaddr *sa, void *ctx)
603 TCPSTAT_INC(tcps_tunneled_pkts);
604 if ((m->m_flags & M_PKTHDR) == 0) {
605 /* Can't handle one that is not a pkt hdr */
606 TCPSTAT_INC(tcps_tunneled_errs);
609 thlen = sizeof(struct tcphdr);
610 if (m->m_len < off + sizeof(struct udphdr) + thlen &&
611 (m = m_pullup(m, off + sizeof(struct udphdr) + thlen)) == NULL) {
612 TCPSTAT_INC(tcps_tunneled_errs);
615 iph = mtod(m, struct ip *);
616 uh = (struct udphdr *)((caddr_t)iph + off);
617 th = (struct tcphdr *)(uh + 1);
618 thlen = th->th_off << 2;
619 if (m->m_len < off + sizeof(struct udphdr) + thlen) {
620 m = m_pullup(m, off + sizeof(struct udphdr) + thlen);
622 TCPSTAT_INC(tcps_tunneled_errs);
625 iph = mtod(m, struct ip *);
626 uh = (struct udphdr *)((caddr_t)iph + off);
627 th = (struct tcphdr *)(uh + 1);
630 m->m_pkthdr.tcp_tun_port = port = uh->uh_sport;
631 bcopy(th, uh, m->m_len - off);
632 m->m_len -= sizeof(struct udphdr);
633 m->m_pkthdr.len -= sizeof(struct udphdr);
635 * We use the same algorithm for
636 * both UDP and TCP for c-sum. So
637 * the code in tcp_input will skip
638 * the checksum. So we do nothing
639 * with the flag (m->m_pkthdr.csum_flags).
644 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
645 tcp_input_with_port(&m, &off, IPPROTO_TCP, port);
649 case IPV6_VERSION >> 4:
650 ip6 = mtod(m, struct ip6_hdr *);
651 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
652 tcp6_input_with_port(&m, &off, IPPROTO_TCP, port);
667 sysctl_net_inet_default_tcp_functions(SYSCTL_HANDLER_ARGS)
670 struct tcp_function_set fs;
671 struct tcp_function_block *blk;
673 memset(&fs, 0, sizeof(fs));
674 rw_rlock(&tcp_function_lock);
675 blk = find_tcp_fb_locked(tcp_func_set_ptr, NULL);
678 strcpy(fs.function_set_name, blk->tfb_tcp_block_name);
679 fs.pcbcnt = blk->tfb_refcnt;
681 rw_runlock(&tcp_function_lock);
682 error = sysctl_handle_string(oidp, fs.function_set_name,
683 sizeof(fs.function_set_name), req);
685 /* Check for error or no change */
686 if (error != 0 || req->newptr == NULL)
689 rw_wlock(&tcp_function_lock);
690 blk = find_tcp_functions_locked(&fs);
692 (blk->tfb_flags & TCP_FUNC_BEING_REMOVED)) {
696 tcp_func_set_ptr = blk;
698 rw_wunlock(&tcp_function_lock);
702 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_default,
703 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
704 NULL, 0, sysctl_net_inet_default_tcp_functions, "A",
705 "Set/get the default TCP functions");
708 sysctl_net_inet_list_available(SYSCTL_HANDLER_ARGS)
710 int error, cnt, linesz;
711 struct tcp_function *f;
717 rw_rlock(&tcp_function_lock);
718 TAILQ_FOREACH(f, &t_functions, tf_next) {
721 rw_runlock(&tcp_function_lock);
723 bufsz = (cnt+2) * ((TCP_FUNCTION_NAME_LEN_MAX * 2) + 13) + 1;
724 buffer = malloc(bufsz, M_TEMP, M_WAITOK);
729 linesz = snprintf(cp, bufsz, "\n%-32s%c %-32s %s\n", "Stack", 'D',
730 "Alias", "PCB count");
735 rw_rlock(&tcp_function_lock);
736 TAILQ_FOREACH(f, &t_functions, tf_next) {
737 alias = (f->tf_name != f->tf_fb->tfb_tcp_block_name);
738 linesz = snprintf(cp, bufsz, "%-32s%c %-32s %u\n",
739 f->tf_fb->tfb_tcp_block_name,
740 (f->tf_fb == tcp_func_set_ptr) ? '*' : ' ',
741 alias ? f->tf_name : "-",
742 f->tf_fb->tfb_refcnt);
743 if (linesz >= bufsz) {
751 rw_runlock(&tcp_function_lock);
753 error = sysctl_handle_string(oidp, buffer, outsz + 1, req);
754 free(buffer, M_TEMP);
758 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_available,
759 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
760 NULL, 0, sysctl_net_inet_list_available, "A",
761 "list available TCP Function sets");
763 VNET_DEFINE(int, tcp_udp_tunneling_port) = TCP_TUNNELING_PORT_DEFAULT;
766 VNET_DEFINE(struct socket *, udp4_tun_socket) = NULL;
767 #define V_udp4_tun_socket VNET(udp4_tun_socket)
770 VNET_DEFINE(struct socket *, udp6_tun_socket) = NULL;
771 #define V_udp6_tun_socket VNET(udp6_tun_socket)
775 tcp_over_udp_stop(void)
778 * This function assumes sysctl caller holds inp_rinfo_lock()
782 if (V_udp4_tun_socket != NULL) {
783 soclose(V_udp4_tun_socket);
784 V_udp4_tun_socket = NULL;
788 if (V_udp6_tun_socket != NULL) {
789 soclose(V_udp6_tun_socket);
790 V_udp6_tun_socket = NULL;
796 tcp_over_udp_start(void)
801 struct sockaddr_in sin;
804 struct sockaddr_in6 sin6;
807 * This function assumes sysctl caller holds inp_info_rlock()
810 port = V_tcp_udp_tunneling_port;
811 if (ntohs(port) == 0) {
812 /* Must have a port set */
816 if (V_udp4_tun_socket != NULL) {
817 /* Already running -- must stop first */
822 if (V_udp6_tun_socket != NULL) {
823 /* Already running -- must stop first */
828 if ((ret = socreate(PF_INET, &V_udp4_tun_socket,
829 SOCK_DGRAM, IPPROTO_UDP,
830 curthread->td_ucred, curthread))) {
834 /* Call the special UDP hook. */
835 if ((ret = udp_set_kernel_tunneling(V_udp4_tun_socket,
836 tcp_recv_udp_tunneled_packet,
842 /* Ok, we have a socket, bind it to the port. */
843 memset(&sin, 0, sizeof(struct sockaddr_in));
844 sin.sin_len = sizeof(struct sockaddr_in);
845 sin.sin_family = AF_INET;
846 sin.sin_port = htons(port);
847 if ((ret = sobind(V_udp4_tun_socket,
848 (struct sockaddr *)&sin, curthread))) {
854 if ((ret = socreate(PF_INET6, &V_udp6_tun_socket,
855 SOCK_DGRAM, IPPROTO_UDP,
856 curthread->td_ucred, curthread))) {
860 /* Call the special UDP hook. */
861 if ((ret = udp_set_kernel_tunneling(V_udp6_tun_socket,
862 tcp_recv_udp_tunneled_packet,
863 tcp6_ctlinput_viaudp,
868 /* Ok, we have a socket, bind it to the port. */
869 memset(&sin6, 0, sizeof(struct sockaddr_in6));
870 sin6.sin6_len = sizeof(struct sockaddr_in6);
871 sin6.sin6_family = AF_INET6;
872 sin6.sin6_port = htons(port);
873 if ((ret = sobind(V_udp6_tun_socket,
874 (struct sockaddr *)&sin6, curthread))) {
883 sysctl_net_inet_tcp_udp_tunneling_port_check(SYSCTL_HANDLER_ARGS)
888 old = V_tcp_udp_tunneling_port;
890 error = sysctl_handle_int(oidp, &new, 0, req);
892 (req->newptr != NULL)) {
893 if ((new < TCP_TUNNELING_PORT_MIN) ||
894 (new > TCP_TUNNELING_PORT_MAX)) {
897 V_tcp_udp_tunneling_port = new;
902 error = tcp_over_udp_start();
909 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_port,
910 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
911 &VNET_NAME(tcp_udp_tunneling_port),
912 0, &sysctl_net_inet_tcp_udp_tunneling_port_check, "IU",
913 "Tunneling port for tcp over udp");
915 VNET_DEFINE(int, tcp_udp_tunneling_overhead) = TCP_TUNNELING_OVERHEAD_DEFAULT;
918 sysctl_net_inet_tcp_udp_tunneling_overhead_check(SYSCTL_HANDLER_ARGS)
922 new = V_tcp_udp_tunneling_overhead;
923 error = sysctl_handle_int(oidp, &new, 0, req);
924 if (error == 0 && req->newptr) {
925 if ((new < TCP_TUNNELING_OVERHEAD_MIN) ||
926 (new > TCP_TUNNELING_OVERHEAD_MAX))
929 V_tcp_udp_tunneling_overhead = new;
934 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_overhead,
935 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
936 &VNET_NAME(tcp_udp_tunneling_overhead),
937 0, &sysctl_net_inet_tcp_udp_tunneling_overhead_check, "IU",
938 "MSS reduction when using tcp over udp");
941 * Exports one (struct tcp_function_info) for each alias/name.
944 sysctl_net_inet_list_func_info(SYSCTL_HANDLER_ARGS)
947 struct tcp_function *f;
948 struct tcp_function_info tfi;
951 * We don't allow writes.
953 if (req->newptr != NULL)
957 * Wire the old buffer so we can directly copy the functions to
958 * user space without dropping the lock.
960 if (req->oldptr != NULL) {
961 error = sysctl_wire_old_buffer(req, 0);
967 * Walk the list and copy out matching entries. If INVARIANTS
968 * is compiled in, also walk the list to verify the length of
969 * the list matches what we have recorded.
971 rw_rlock(&tcp_function_lock);
975 if (req->oldptr == NULL) {
980 TAILQ_FOREACH(f, &t_functions, tf_next) {
984 if (req->oldptr != NULL) {
985 bzero(&tfi, sizeof(tfi));
986 tfi.tfi_refcnt = f->tf_fb->tfb_refcnt;
987 tfi.tfi_id = f->tf_fb->tfb_id;
988 (void)strlcpy(tfi.tfi_alias, f->tf_name,
989 sizeof(tfi.tfi_alias));
990 (void)strlcpy(tfi.tfi_name,
991 f->tf_fb->tfb_tcp_block_name, sizeof(tfi.tfi_name));
992 error = SYSCTL_OUT(req, &tfi, sizeof(tfi));
994 * Don't stop on error, as that is the
995 * mechanism we use to accumulate length
996 * information if the buffer was too short.
1000 KASSERT(cnt == tcp_fb_cnt,
1001 ("%s: cnt (%d) != tcp_fb_cnt (%d)", __func__, cnt, tcp_fb_cnt));
1005 rw_runlock(&tcp_function_lock);
1006 if (req->oldptr == NULL)
1007 error = SYSCTL_OUT(req, NULL,
1008 (cnt + 1) * sizeof(struct tcp_function_info));
1013 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, function_info,
1014 CTLTYPE_OPAQUE | CTLFLAG_SKIP | CTLFLAG_RD | CTLFLAG_MPSAFE,
1015 NULL, 0, sysctl_net_inet_list_func_info, "S,tcp_function_info",
1016 "List TCP function block name-to-ID mappings");
1019 * tfb_tcp_handoff_ok() function for the default stack.
1020 * Note that we'll basically try to take all comers.
1023 tcp_default_handoff_ok(struct tcpcb *tp)
1030 * tfb_tcp_fb_init() function for the default stack.
1032 * This handles making sure we have appropriate timers set if you are
1033 * transitioning a socket that has some amount of setup done.
1035 * The init() fuction from the default can *never* return non-zero i.e.
1036 * it is required to always succeed since it is the stack of last resort!
1039 tcp_default_fb_init(struct tcpcb *tp)
1044 INP_WLOCK_ASSERT(tp->t_inpcb);
1046 KASSERT(tp->t_state >= 0 && tp->t_state < TCPS_TIME_WAIT,
1047 ("%s: connection %p in unexpected state %d", __func__, tp,
1051 * Nothing to do for ESTABLISHED or LISTEN states. And, we don't
1052 * know what to do for unexpected states (which includes TIME_WAIT).
1054 if (tp->t_state <= TCPS_LISTEN || tp->t_state >= TCPS_TIME_WAIT)
1058 * Make sure some kind of transmission timer is set if there is
1061 so = tp->t_inpcb->inp_socket;
1062 if ((!TCPS_HAVEESTABLISHED(tp->t_state) || sbavail(&so->so_snd) ||
1063 tp->snd_una != tp->snd_max) && !(tcp_timer_active(tp, TT_REXMT) ||
1064 tcp_timer_active(tp, TT_PERSIST))) {
1066 * If the session has established and it looks like it should
1067 * be in the persist state, set the persist timer. Otherwise,
1068 * set the retransmit timer.
1070 if (TCPS_HAVEESTABLISHED(tp->t_state) && tp->snd_wnd == 0 &&
1071 (int32_t)(tp->snd_nxt - tp->snd_una) <
1072 (int32_t)sbavail(&so->so_snd))
1075 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
1078 /* All non-embryonic sessions get a keepalive timer. */
1079 if (!tcp_timer_active(tp, TT_KEEP))
1080 tcp_timer_activate(tp, TT_KEEP,
1081 TCPS_HAVEESTABLISHED(tp->t_state) ? TP_KEEPIDLE(tp) :
1085 * Make sure critical variables are initialized
1086 * if transitioning while in Recovery.
1088 if IN_FASTRECOVERY(tp->t_flags) {
1089 if (tp->sackhint.recover_fs == 0)
1090 tp->sackhint.recover_fs = max(1,
1091 tp->snd_nxt - tp->snd_una);
1098 * tfb_tcp_fb_fini() function for the default stack.
1100 * This changes state as necessary (or prudent) to prepare for another stack
1101 * to assume responsibility for the connection.
1104 tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged)
1107 INP_WLOCK_ASSERT(tp->t_inpcb);
1112 * Target size of TCP PCB hash tables. Must be a power of two.
1114 * Note that this can be overridden by the kernel environment
1115 * variable net.inet.tcp.tcbhashsize
1118 #define TCBHASHSIZE 0
1123 * Callouts should be moved into struct tcp directly. They are currently
1124 * separate because the tcpcb structure is exported to userland for sysctl
1125 * parsing purposes, which do not know about callouts.
1129 struct tcp_timer tt;
1136 VNET_DEFINE_STATIC(uma_zone_t, tcpcb_zone);
1137 #define V_tcpcb_zone VNET(tcpcb_zone)
1139 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers");
1140 MALLOC_DEFINE(M_TCPFUNCTIONS, "tcpfunc", "TCP function set memory");
1142 static struct mtx isn_mtx;
1144 #define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
1145 #define ISN_LOCK() mtx_lock(&isn_mtx)
1146 #define ISN_UNLOCK() mtx_unlock(&isn_mtx)
1148 INPCBSTORAGE_DEFINE(tcpcbstor, "tcpinp", "tcp_inpcb", "tcp", "tcphash");
1151 * Take a value and get the next power of 2 that doesn't overflow.
1152 * Used to size the tcp_inpcb hash buckets.
1155 maketcp_hashsize(int size)
1161 * get the next power of 2 higher than maxsockets.
1163 hashsize = 1 << fls(size);
1164 /* catch overflow, and just go one power of 2 smaller */
1165 if (hashsize < size) {
1166 hashsize = 1 << (fls(size) - 1);
1171 static volatile int next_tcp_stack_id = 1;
1174 * Register a TCP function block with the name provided in the names
1175 * array. (Note that this function does NOT automatically register
1176 * blk->tfb_tcp_block_name as a stack name. Therefore, you should
1177 * explicitly include blk->tfb_tcp_block_name in the list of names if
1178 * you wish to register the stack with that name.)
1180 * Either all name registrations will succeed or all will fail. If
1181 * a name registration fails, the function will update the num_names
1182 * argument to point to the array index of the name that encountered
1185 * Returns 0 on success, or an error code on failure.
1188 register_tcp_functions_as_names(struct tcp_function_block *blk, int wait,
1189 const char *names[], int *num_names)
1191 struct tcp_function *n;
1192 struct tcp_function_set fs;
1195 KASSERT(names != NULL && *num_names > 0,
1196 ("%s: Called with 0-length name list", __func__));
1197 KASSERT(names != NULL, ("%s: Called with NULL name list", __func__));
1198 KASSERT(rw_initialized(&tcp_function_lock),
1199 ("%s: called too early", __func__));
1201 if ((blk->tfb_tcp_output == NULL) ||
1202 (blk->tfb_tcp_do_segment == NULL) ||
1203 (blk->tfb_tcp_ctloutput == NULL) ||
1204 (strlen(blk->tfb_tcp_block_name) == 0)) {
1206 * These functions are required and you
1212 if (blk->tfb_tcp_timer_stop_all ||
1213 blk->tfb_tcp_timer_activate ||
1214 blk->tfb_tcp_timer_active ||
1215 blk->tfb_tcp_timer_stop) {
1217 * If you define one timer function you
1218 * must have them all.
1220 if ((blk->tfb_tcp_timer_stop_all == NULL) ||
1221 (blk->tfb_tcp_timer_activate == NULL) ||
1222 (blk->tfb_tcp_timer_active == NULL) ||
1223 (blk->tfb_tcp_timer_stop == NULL)) {
1229 if (blk->tfb_flags & TCP_FUNC_BEING_REMOVED) {
1234 refcount_init(&blk->tfb_refcnt, 0);
1235 blk->tfb_id = atomic_fetchadd_int(&next_tcp_stack_id, 1);
1236 for (i = 0; i < *num_names; i++) {
1237 n = malloc(sizeof(struct tcp_function), M_TCPFUNCTIONS, wait);
1244 (void)strlcpy(fs.function_set_name, names[i],
1245 sizeof(fs.function_set_name));
1246 rw_wlock(&tcp_function_lock);
1247 if (find_tcp_functions_locked(&fs) != NULL) {
1248 /* Duplicate name space not allowed */
1249 rw_wunlock(&tcp_function_lock);
1250 free(n, M_TCPFUNCTIONS);
1254 (void)strlcpy(n->tf_name, names[i], sizeof(n->tf_name));
1255 TAILQ_INSERT_TAIL(&t_functions, n, tf_next);
1257 rw_wunlock(&tcp_function_lock);
1263 * Deregister the names we just added. Because registration failed
1264 * for names[i], we don't need to deregister that name.
1267 rw_wlock(&tcp_function_lock);
1269 TAILQ_FOREACH(n, &t_functions, tf_next) {
1270 if (!strncmp(n->tf_name, names[i],
1271 TCP_FUNCTION_NAME_LEN_MAX)) {
1272 TAILQ_REMOVE(&t_functions, n, tf_next);
1275 free(n, M_TCPFUNCTIONS);
1280 rw_wunlock(&tcp_function_lock);
1285 * Register a TCP function block using the name provided in the name
1288 * Returns 0 on success, or an error code on failure.
1291 register_tcp_functions_as_name(struct tcp_function_block *blk, const char *name,
1294 const char *name_list[1];
1299 name_list[0] = name;
1301 name_list[0] = blk->tfb_tcp_block_name;
1302 rv = register_tcp_functions_as_names(blk, wait, name_list, &num_names);
1307 * Register a TCP function block using the name defined in
1308 * blk->tfb_tcp_block_name.
1310 * Returns 0 on success, or an error code on failure.
1313 register_tcp_functions(struct tcp_function_block *blk, int wait)
1316 return (register_tcp_functions_as_name(blk, NULL, wait));
1320 * Deregister all names associated with a function block. This
1321 * functionally removes the function block from use within the system.
1323 * When called with a true quiesce argument, mark the function block
1324 * as being removed so no more stacks will use it and determine
1325 * whether the removal would succeed.
1327 * When called with a false quiesce argument, actually attempt the
1330 * When called with a force argument, attempt to switch all TCBs to
1331 * use the default stack instead of returning EBUSY.
1333 * Returns 0 on success (or if the removal would succeed, or an error
1337 deregister_tcp_functions(struct tcp_function_block *blk, bool quiesce,
1340 struct tcp_function *f;
1342 if (blk == &tcp_def_funcblk) {
1343 /* You can't un-register the default */
1346 rw_wlock(&tcp_function_lock);
1347 if (blk == tcp_func_set_ptr) {
1348 /* You can't free the current default */
1349 rw_wunlock(&tcp_function_lock);
1352 /* Mark the block so no more stacks can use it. */
1353 blk->tfb_flags |= TCP_FUNC_BEING_REMOVED;
1355 * If TCBs are still attached to the stack, attempt to switch them
1356 * to the default stack.
1358 if (force && blk->tfb_refcnt) {
1359 struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo,
1360 INPLOOKUP_WLOCKPCB);
1363 VNET_ITERATOR_DECL(vnet_iter);
1365 rw_wunlock(&tcp_function_lock);
1368 VNET_FOREACH(vnet_iter) {
1369 CURVNET_SET(vnet_iter);
1370 while ((inp = inp_next(&inpi)) != NULL) {
1371 if (inp->inp_flags & INP_TIMEWAIT)
1373 tp = intotcpcb(inp);
1374 if (tp == NULL || tp->t_fb != blk)
1376 tcp_switch_back_to_default(tp);
1380 VNET_LIST_RUNLOCK();
1382 rw_wlock(&tcp_function_lock);
1384 if (blk->tfb_refcnt) {
1385 /* TCBs still attached. */
1386 rw_wunlock(&tcp_function_lock);
1391 rw_wunlock(&tcp_function_lock);
1394 /* Remove any function names that map to this function block. */
1395 while (find_tcp_fb_locked(blk, &f) != NULL) {
1396 TAILQ_REMOVE(&t_functions, f, tf_next);
1399 free(f, M_TCPFUNCTIONS);
1401 rw_wunlock(&tcp_function_lock);
1408 struct epoch_tracker et;
1409 VNET_ITERATOR_DECL(vnet_iter);
1414 NET_EPOCH_ENTER(et);
1415 VNET_LIST_RLOCK_NOSLEEP();
1416 VNET_FOREACH(vnet_iter) {
1417 CURVNET_SET(vnet_iter);
1418 struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo,
1419 INPLOOKUP_WLOCKPCB);
1424 * Walk the tcpbs, if existing, and flush the reassembly queue,
1425 * if there is one...
1426 * XXX: The "Net/3" implementation doesn't imply that the TCP
1427 * reassembly queue should be flushed, but in a situation
1428 * where we're really low on mbufs, this is potentially
1431 while ((inpb = inp_next(&inpi)) != NULL) {
1432 if (inpb->inp_flags & INP_TIMEWAIT)
1434 if ((tcpb = intotcpcb(inpb)) != NULL) {
1435 tcp_reass_flush(tcpb);
1436 tcp_clean_sackreport(tcpb);
1438 tcp_log_drain(tcpb);
1441 if (tcp_pcap_aggressive_free) {
1442 /* Free the TCP PCAP queues. */
1443 tcp_pcap_drain(&(tcpb->t_inpkts));
1444 tcp_pcap_drain(&(tcpb->t_outpkts));
1451 VNET_LIST_RUNLOCK_NOSLEEP();
1456 tcp_vnet_init(void *arg __unused)
1460 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN,
1461 &V_tcp_hhh[HHOOK_TCP_EST_IN], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
1462 printf("%s: WARNING: unable to register helper hook\n", __func__);
1463 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT,
1464 &V_tcp_hhh[HHOOK_TCP_EST_OUT], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
1465 printf("%s: WARNING: unable to register helper hook\n", __func__);
1468 if (tcp_stats_init())
1469 printf("%s: WARNING: unable to initialise TCP stats\n",
1472 in_pcbinfo_init(&V_tcbinfo, &tcpcbstor, tcp_tcbhashsize,
1476 * These have to be type stable for the benefit of the timers.
1478 V_tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
1479 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1480 uma_zone_set_max(V_tcpcb_zone, maxsockets);
1481 uma_zone_set_warning(V_tcpcb_zone, "kern.ipc.maxsockets limit reached");
1487 TUNABLE_INT_FETCH("net.inet.tcp.sack.enable", &V_tcp_do_sack);
1488 V_sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
1489 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1491 tcp_fastopen_init();
1493 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK);
1494 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK);
1496 V_tcp_msl = TCPTV_MSL;
1498 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH,
1499 tcp_vnet_init, NULL);
1502 tcp_init(void *arg __unused)
1504 const char *tcbhash_tuneable;
1507 tcp_reass_global_init();
1509 /* XXX virtualize those below? */
1510 tcp_delacktime = TCPTV_DELACK;
1511 tcp_keepinit = TCPTV_KEEP_INIT;
1512 tcp_keepidle = TCPTV_KEEP_IDLE;
1513 tcp_keepintvl = TCPTV_KEEPINTVL;
1514 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
1515 tcp_rexmit_initial = TCPTV_RTOBASE;
1516 if (tcp_rexmit_initial < 1)
1517 tcp_rexmit_initial = 1;
1518 tcp_rexmit_min = TCPTV_MIN;
1519 if (tcp_rexmit_min < 1)
1521 tcp_persmin = TCPTV_PERSMIN;
1522 tcp_persmax = TCPTV_PERSMAX;
1523 tcp_rexmit_slop = TCPTV_CPU_VAR;
1524 tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT;
1526 /* Setup the tcp function block list */
1527 TAILQ_INIT(&t_functions);
1528 rw_init(&tcp_function_lock, "tcp_func_lock");
1529 register_tcp_functions(&tcp_def_funcblk, M_WAITOK);
1531 /* Initialize the TCP logging data. */
1534 arc4rand(&V_ts_offset_secret, sizeof(V_ts_offset_secret), 0);
1536 if (tcp_soreceive_stream) {
1538 tcp_protosw.pr_soreceive = soreceive_stream;
1541 tcp6_protosw.pr_soreceive = soreceive_stream;
1546 max_protohdr_grow(sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
1548 max_protohdr_grow(sizeof(struct tcpiphdr));
1552 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
1553 SHUTDOWN_PRI_DEFAULT);
1554 EVENTHANDLER_REGISTER(vm_lowmem, tcp_drain, NULL, LOWMEM_PRI_DEFAULT);
1555 EVENTHANDLER_REGISTER(mbuf_lowmem, tcp_drain, NULL, LOWMEM_PRI_DEFAULT);
1557 tcp_inp_lro_direct_queue = counter_u64_alloc(M_WAITOK);
1558 tcp_inp_lro_wokeup_queue = counter_u64_alloc(M_WAITOK);
1559 tcp_inp_lro_compressed = counter_u64_alloc(M_WAITOK);
1560 tcp_inp_lro_locks_taken = counter_u64_alloc(M_WAITOK);
1561 tcp_extra_mbuf = counter_u64_alloc(M_WAITOK);
1562 tcp_would_have_but = counter_u64_alloc(M_WAITOK);
1563 tcp_comp_total = counter_u64_alloc(M_WAITOK);
1564 tcp_uncomp_total = counter_u64_alloc(M_WAITOK);
1565 tcp_bad_csums = counter_u64_alloc(M_WAITOK);
1570 hashsize = TCBHASHSIZE;
1571 tcbhash_tuneable = "net.inet.tcp.tcbhashsize";
1572 TUNABLE_INT_FETCH(tcbhash_tuneable, &hashsize);
1573 if (hashsize == 0) {
1575 * Auto tune the hash size based on maxsockets.
1576 * A perfect hash would have a 1:1 mapping
1577 * (hashsize = maxsockets) however it's been
1578 * suggested that O(2) average is better.
1580 hashsize = maketcp_hashsize(maxsockets / 4);
1582 * Our historical default is 512,
1583 * do not autotune lower than this.
1588 printf("%s: %s auto tuned to %d\n", __func__,
1589 tcbhash_tuneable, hashsize);
1592 * We require a hashsize to be a power of two.
1593 * Previously if it was not a power of two we would just reset it
1594 * back to 512, which could be a nasty surprise if you did not notice
1595 * the error message.
1596 * Instead what we do is clip it to the closest power of two lower
1597 * than the specified hash value.
1599 if (!powerof2(hashsize)) {
1600 int oldhashsize = hashsize;
1602 hashsize = maketcp_hashsize(hashsize);
1603 /* prevent absurdly low value */
1606 printf("%s: WARNING: TCB hash size not a power of 2, "
1607 "clipped from %d to %d.\n", __func__, oldhashsize,
1610 tcp_tcbhashsize = hashsize;
1613 IPPROTO_REGISTER(IPPROTO_TCP, tcp_input, tcp_ctlinput);
1616 IP6PROTO_REGISTER(IPPROTO_TCP, tcp6_input, tcp6_ctlinput);
1619 SYSINIT(tcp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, tcp_init, NULL);
1623 tcp_destroy(void *unused __unused)
1631 * All our processes are gone, all our sockets should be cleaned
1632 * up, which means, we should be past the tcp_discardcb() calls.
1633 * Sleep to let all tcpcb timers really disappear and cleanup.
1636 INP_INFO_WLOCK(&V_tcbinfo);
1637 n = V_tcbinfo.ipi_count;
1638 INP_INFO_WUNLOCK(&V_tcbinfo);
1641 pause("tcpdes", hz / 10);
1646 in_pcbinfo_destroy(&V_tcbinfo);
1647 /* tcp_discardcb() clears the sack_holes up. */
1648 uma_zdestroy(V_sack_hole_zone);
1649 uma_zdestroy(V_tcpcb_zone);
1652 * Cannot free the zone until all tcpcbs are released as we attach
1653 * the allocations to them.
1655 tcp_fastopen_destroy();
1657 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES);
1658 VNET_PCPUSTAT_FREE(tcpstat);
1661 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_IN]);
1663 printf("%s: WARNING: unable to deregister helper hook "
1664 "type=%d, id=%d: error %d returned\n", __func__,
1665 HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, error);
1667 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_OUT]);
1669 printf("%s: WARNING: unable to deregister helper hook "
1670 "type=%d, id=%d: error %d returned\n", __func__,
1671 HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, error);
1675 VNET_SYSUNINIT(tcp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, tcp_destroy, NULL);
1685 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
1686 * tcp_template used to store this data in mbufs, but we now recopy it out
1687 * of the tcpcb each time to conserve mbufs.
1690 tcpip_fillheaders(struct inpcb *inp, uint16_t port, void *ip_ptr, void *tcp_ptr)
1692 struct tcphdr *th = (struct tcphdr *)tcp_ptr;
1694 INP_WLOCK_ASSERT(inp);
1697 if ((inp->inp_vflag & INP_IPV6) != 0) {
1698 struct ip6_hdr *ip6;
1700 ip6 = (struct ip6_hdr *)ip_ptr;
1701 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
1702 (inp->inp_flow & IPV6_FLOWINFO_MASK);
1703 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
1704 (IPV6_VERSION & IPV6_VERSION_MASK);
1706 ip6->ip6_nxt = IPPROTO_TCP;
1708 ip6->ip6_nxt = IPPROTO_UDP;
1709 ip6->ip6_plen = htons(sizeof(struct tcphdr));
1710 ip6->ip6_src = inp->in6p_laddr;
1711 ip6->ip6_dst = inp->in6p_faddr;
1714 #if defined(INET6) && defined(INET)
1721 ip = (struct ip *)ip_ptr;
1722 ip->ip_v = IPVERSION;
1724 ip->ip_tos = inp->inp_ip_tos;
1728 ip->ip_ttl = inp->inp_ip_ttl;
1731 ip->ip_p = IPPROTO_TCP;
1733 ip->ip_p = IPPROTO_UDP;
1734 ip->ip_src = inp->inp_laddr;
1735 ip->ip_dst = inp->inp_faddr;
1738 th->th_sport = inp->inp_lport;
1739 th->th_dport = inp->inp_fport;
1743 tcp_set_flags(th, 0);
1746 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */
1750 * Create template to be used to send tcp packets on a connection.
1751 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
1752 * use for this function is in keepalives, which use tcp_respond.
1755 tcpip_maketemplate(struct inpcb *inp)
1759 t = malloc(sizeof(*t), M_TEMP, M_NOWAIT);
1762 tcpip_fillheaders(inp, 0, (void *)&t->tt_ipgen, (void *)&t->tt_t);
1767 * Send a single message to the TCP at address specified by
1768 * the given TCP/IP header. If m == NULL, then we make a copy
1769 * of the tcpiphdr at th and send directly to the addressed host.
1770 * This is used to force keep alive messages out using the TCP
1771 * template for a connection. If flags are given then we send
1772 * a message back to the TCP which originated the segment th,
1773 * and discard the mbuf containing it and any other attached mbufs.
1775 * In any case the ack and sequence number of the transmitted
1776 * segment are as specified by the parameters.
1778 * NOTE: If m != NULL, then th must point to *inside* the mbuf.
1781 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
1782 tcp_seq ack, tcp_seq seq, int flags)
1788 struct udphdr *uh = NULL;
1790 struct tcp_log_buffer *lgb;
1793 struct ip6_hdr *ip6;
1796 int optlen, tlen, win, ulen;
1801 int thflags = tcp_get_flags(th);
1804 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
1808 isipv6 = ((struct ip *)ipgen)->ip_v == (IPV6_VERSION >> 4);
1815 KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
1816 INP_LOCK_ASSERT(inp);
1822 if (isipv6 && ip6 && (ip6->ip6_nxt == IPPROTO_UDP))
1823 port = m->m_pkthdr.tcp_tun_port;
1826 if (ip && (ip->ip_p == IPPROTO_UDP))
1827 port = m->m_pkthdr.tcp_tun_port;
1836 if (!(flags & TH_RST)) {
1837 win = sbspace(&inp->inp_socket->so_rcv);
1838 if (win > TCP_MAXWIN << tp->rcv_scale)
1839 win = TCP_MAXWIN << tp->rcv_scale;
1841 if ((tp->t_flags & TF_NOOPT) == 0)
1845 m = m_gethdr(M_NOWAIT, MT_DATA);
1848 m->m_data += max_linkhdr;
1851 bcopy((caddr_t)ip6, mtod(m, caddr_t),
1852 sizeof(struct ip6_hdr));
1853 ip6 = mtod(m, struct ip6_hdr *);
1854 nth = (struct tcphdr *)(ip6 + 1);
1856 /* Insert a UDP header */
1857 uh = (struct udphdr *)nth;
1858 uh->uh_sport = htons(V_tcp_udp_tunneling_port);
1859 uh->uh_dport = port;
1860 nth = (struct tcphdr *)(uh + 1);
1865 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
1866 ip = mtod(m, struct ip *);
1867 nth = (struct tcphdr *)(ip + 1);
1869 /* Insert a UDP header */
1870 uh = (struct udphdr *)nth;
1871 uh->uh_sport = htons(V_tcp_udp_tunneling_port);
1872 uh->uh_dport = port;
1873 nth = (struct tcphdr *)(uh + 1);
1876 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
1878 } else if ((!M_WRITABLE(m)) || (port != 0)) {
1881 /* Can't reuse 'm', allocate a new mbuf. */
1882 n = m_gethdr(M_NOWAIT, MT_DATA);
1888 if (!m_dup_pkthdr(n, m, M_NOWAIT)) {
1894 n->m_data += max_linkhdr;
1895 /* m_len is set later */
1896 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
1899 bcopy((caddr_t)ip6, mtod(n, caddr_t),
1900 sizeof(struct ip6_hdr));
1901 ip6 = mtod(n, struct ip6_hdr *);
1902 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
1903 nth = (struct tcphdr *)(ip6 + 1);
1905 /* Insert a UDP header */
1906 uh = (struct udphdr *)nth;
1907 uh->uh_sport = htons(V_tcp_udp_tunneling_port);
1908 uh->uh_dport = port;
1909 nth = (struct tcphdr *)(uh + 1);
1914 bcopy((caddr_t)ip, mtod(n, caddr_t), sizeof(struct ip));
1915 ip = mtod(n, struct ip *);
1916 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
1917 nth = (struct tcphdr *)(ip + 1);
1919 /* Insert a UDP header */
1920 uh = (struct udphdr *)nth;
1921 uh->uh_sport = htons(V_tcp_udp_tunneling_port);
1922 uh->uh_dport = port;
1923 nth = (struct tcphdr *)(uh + 1);
1926 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
1927 xchg(nth->th_dport, nth->th_sport, uint16_t);
1934 * XXX MRT We inherit the FIB, which is lucky.
1938 m->m_data = (caddr_t)ipgen;
1939 /* m_len is set later */
1942 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
1943 nth = (struct tcphdr *)(ip6 + 1);
1947 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
1948 nth = (struct tcphdr *)(ip + 1);
1952 * this is usually a case when an extension header
1953 * exists between the IPv6 header and the
1956 nth->th_sport = th->th_sport;
1957 nth->th_dport = th->th_dport;
1959 xchg(nth->th_dport, nth->th_sport, uint16_t);
1965 tlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
1967 #if defined(INET) && defined(INET6)
1971 tlen = sizeof (struct tcpiphdr);
1974 tlen += sizeof (struct udphdr);
1977 KASSERT(M_TRAILINGSPACE(m) >= tlen,
1978 ("Not enough trailing space for message (m=%p, need=%d, have=%ld)",
1979 m, tlen, (long)M_TRAILINGSPACE(m)));
1984 /* Make sure we have room. */
1985 if (M_TRAILINGSPACE(m) < TCP_MAXOLEN) {
1986 m->m_next = m_get(M_NOWAIT, MT_DATA);
1988 optp = mtod(m->m_next, u_char *);
1993 optp = (u_char *) (nth + 1);
1999 if (tp->t_flags & TF_RCVD_TSTMP) {
2000 to.to_tsval = tcp_ts_getticks() + tp->ts_offset;
2001 to.to_tsecr = tp->ts_recent;
2002 to.to_flags |= TOF_TS;
2004 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
2005 /* TCP-MD5 (RFC2385). */
2006 if (tp->t_flags & TF_SIGNATURE)
2007 to.to_flags |= TOF_SIGNATURE;
2009 /* Add the options. */
2010 tlen += optlen = tcp_addoptions(&to, optp);
2012 /* Update m_len in the correct mbuf. */
2013 optm->m_len += optlen;
2019 ulen = tlen - sizeof(struct ip6_hdr);
2020 uh->uh_ulen = htons(ulen);
2023 ip6->ip6_vfc = IPV6_VERSION;
2025 ip6->ip6_nxt = IPPROTO_UDP;
2027 ip6->ip6_nxt = IPPROTO_TCP;
2028 ip6->ip6_plen = htons(tlen - sizeof(*ip6));
2031 #if defined(INET) && defined(INET6)
2037 ulen = tlen - sizeof(struct ip);
2038 uh->uh_ulen = htons(ulen);
2040 ip->ip_len = htons(tlen);
2041 ip->ip_ttl = V_ip_defttl;
2043 ip->ip_p = IPPROTO_UDP;
2045 ip->ip_p = IPPROTO_TCP;
2047 if (V_path_mtu_discovery)
2048 ip->ip_off |= htons(IP_DF);
2051 m->m_pkthdr.len = tlen;
2052 m->m_pkthdr.rcvif = NULL;
2056 * Packet is associated with a socket, so allow the
2057 * label of the response to reflect the socket label.
2059 INP_LOCK_ASSERT(inp);
2060 mac_inpcb_create_mbuf(inp, m);
2063 * Packet is not associated with a socket, so possibly
2064 * update the label in place.
2066 mac_netinet_tcp_reply(m);
2069 nth->th_seq = htonl(seq);
2070 nth->th_ack = htonl(ack);
2071 nth->th_off = (sizeof (struct tcphdr) + optlen) >> 2;
2072 tcp_set_flags(nth, flags);
2074 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
2076 nth->th_win = htons((u_short)win);
2079 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
2080 if (to.to_flags & TOF_SIGNATURE) {
2081 if (!TCPMD5_ENABLED() ||
2082 TCPMD5_OUTPUT(m, nth, to.to_signature) != 0) {
2092 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
2093 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
2094 uh->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
2097 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
2098 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
2099 nth->th_sum = in6_cksum_pseudo(ip6,
2100 tlen - sizeof(struct ip6_hdr), IPPROTO_TCP, 0);
2102 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
2106 #if defined(INET6) && defined(INET)
2112 uh->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
2113 htons(ulen + IPPROTO_UDP));
2114 m->m_pkthdr.csum_flags = CSUM_UDP;
2115 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
2118 m->m_pkthdr.csum_flags = CSUM_TCP;
2119 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
2120 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
2121 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
2126 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
2127 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
2129 TCP_PROBE3(debug__output, tp, th, m);
2131 TCP_PROBE5(accept__refused, NULL, NULL, m, tp, nth);
2133 if ((tp != NULL) && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
2134 if (INP_WLOCKED(inp)) {
2135 union tcp_log_stackspecific log;
2138 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2139 log.u_bbr.inhpts = tp->t_inpcb->inp_in_hpts;
2140 log.u_bbr.flex8 = 4;
2141 log.u_bbr.pkts_out = tp->t_maxseg;
2142 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2143 log.u_bbr.delivered = 0;
2144 lgb = tcp_log_event_(tp, nth, NULL, NULL, TCP_LOG_OUT,
2145 ERRNO_UNK, 0, &log, false, NULL, NULL, 0, &tv);
2148 * We can not log the packet, since we only own the
2149 * read lock, but a write lock is needed. The read lock
2150 * is not upgraded to a write lock, since only getting
2151 * the read lock was done intentionally to improve the
2152 * handling of SYN flooding attacks.
2153 * This happens only for pure SYN segments received in
2154 * the initial CLOSED state, or received in a more
2155 * advanced state than listen and the UDP encapsulation
2156 * port is unexpected.
2157 * The incoming SYN segments do not really belong to
2158 * the TCP connection and the handling does not change
2159 * the state of the TCP connection. Therefore, the
2160 * sending of the RST segments is not logged. Please
2161 * note that also the incoming SYN segments are not
2164 * The following code ensures that the above description
2165 * is and stays correct.
2167 KASSERT((thflags & (TH_ACK|TH_SYN)) == TH_SYN &&
2168 (tp->t_state == TCPS_CLOSED ||
2169 (tp->t_state > TCPS_LISTEN && tp->t_port != port)),
2170 ("%s: Logging of TCP segment with flags 0x%b and "
2171 "UDP encapsulation port %u skipped in state %s",
2172 __func__, thflags, PRINT_TH_FLAGS,
2173 ntohs(port), tcpstates[tp->t_state]));
2178 TCPSTAT_INC(tcps_sndacks);
2179 else if (flags & (TH_SYN|TH_FIN|TH_RST))
2180 TCPSTAT_INC(tcps_sndctrl);
2181 TCPSTAT_INC(tcps_sndtotal);
2185 TCP_PROBE5(send, NULL, tp, ip6, tp, nth);
2186 output_ret = ip6_output(m, NULL, NULL, 0, NULL, NULL, inp);
2189 #if defined(INET) && defined(INET6)
2194 TCP_PROBE5(send, NULL, tp, ip, tp, nth);
2195 output_ret = ip_output(m, NULL, NULL, 0, NULL, inp);
2199 lgb->tlb_errno = output_ret;
2203 * Create a new TCP control block, making an
2204 * empty reassembly queue and hooking it to the argument
2205 * protocol control block. The `inp' parameter must have
2206 * come from the zone allocator set up in tcp_init().
2209 tcp_newtcpcb(struct inpcb *inp)
2211 struct tcpcb_mem *tm;
2214 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
2217 tm = uma_zalloc(V_tcpcb_zone, M_NOWAIT | M_ZERO);
2222 /* Initialise cc_var struct for this tcpcb. */
2224 tp->ccv->type = IPPROTO_TCP;
2225 tp->ccv->ccvc.tcp = tp;
2226 rw_rlock(&tcp_function_lock);
2227 tp->t_fb = tcp_func_set_ptr;
2228 refcount_acquire(&tp->t_fb->tfb_refcnt);
2229 rw_runlock(&tcp_function_lock);
2231 * Use the current system default CC algorithm.
2233 cc_attach(tp, CC_DEFAULT_ALGO());
2236 * The tcpcb will hold a reference on its inpcb until tcp_discardcb()
2239 in_pcbref(inp); /* Reference for tcpcb */
2242 if (CC_ALGO(tp)->cb_init != NULL)
2243 if (CC_ALGO(tp)->cb_init(tp->ccv, NULL) > 0) {
2245 if (tp->t_fb->tfb_tcp_fb_fini)
2246 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
2247 in_pcbrele_wlocked(inp);
2248 refcount_release(&tp->t_fb->tfb_refcnt);
2249 uma_zfree(V_tcpcb_zone, tm);
2255 if (khelp_init_osd(HELPER_CLASS_TCP, tp->osd)) {
2256 if (tp->t_fb->tfb_tcp_fb_fini)
2257 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
2258 in_pcbrele_wlocked(inp);
2259 refcount_release(&tp->t_fb->tfb_refcnt);
2260 uma_zfree(V_tcpcb_zone, tm);
2266 tp->t_vnet = inp->inp_vnet;
2268 tp->t_timers = &tm->tt;
2269 TAILQ_INIT(&tp->t_segq);
2272 isipv6 ? V_tcp_v6mssdflt :
2276 /* Set up our timeouts. */
2277 callout_init(&tp->t_timers->tt_rexmt, 1);
2278 callout_init(&tp->t_timers->tt_persist, 1);
2279 callout_init(&tp->t_timers->tt_keep, 1);
2280 callout_init(&tp->t_timers->tt_2msl, 1);
2281 callout_init(&tp->t_timers->tt_delack, 1);
2283 if (V_tcp_do_rfc1323)
2284 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
2286 tp->t_flags |= TF_SACK_PERMIT;
2287 TAILQ_INIT(&tp->snd_holes);
2290 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
2291 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
2292 * reasonable initial retransmit time.
2294 tp->t_srtt = TCPTV_SRTTBASE;
2295 tp->t_rttvar = ((tcp_rexmit_initial - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
2296 tp->t_rttmin = tcp_rexmit_min;
2297 tp->t_rxtcur = tcp_rexmit_initial;
2298 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
2299 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
2300 tp->t_rcvtime = ticks;
2302 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
2303 * because the socket may be bound to an IPv6 wildcard address,
2304 * which may match an IPv4-mapped IPv6 address.
2306 inp->inp_ip_ttl = V_ip_defttl;
2310 * Init the TCP PCAP queues.
2312 tcp_pcap_tcpcb_init(tp);
2315 /* Initialize the per-TCPCB log data. */
2316 tcp_log_tcpcbinit(tp);
2318 tp->t_pacing_rate = -1;
2319 if (tp->t_fb->tfb_tcp_fb_init) {
2320 if ((*tp->t_fb->tfb_tcp_fb_init)(tp)) {
2321 refcount_release(&tp->t_fb->tfb_refcnt);
2322 in_pcbrele_wlocked(inp);
2323 uma_zfree(V_tcpcb_zone, tm);
2328 if (V_tcp_perconn_stats_enable == 1)
2329 tp->t_stats = stats_blob_alloc(V_tcp_perconn_stats_dflt_tpl, 0);
2332 tp->t_flags |= TF_LRD;
2333 return (tp); /* XXX */
2337 * Drop a TCP connection, reporting
2338 * the specified error. If connection is synchronized,
2339 * then send a RST to peer.
2342 tcp_drop(struct tcpcb *tp, int errno)
2344 struct socket *so = tp->t_inpcb->inp_socket;
2347 INP_WLOCK_ASSERT(tp->t_inpcb);
2349 if (TCPS_HAVERCVDSYN(tp->t_state)) {
2350 tcp_state_change(tp, TCPS_CLOSED);
2351 /* Don't use tcp_output() here due to possible recursion. */
2352 (void)tcp_output_nodrop(tp);
2353 TCPSTAT_INC(tcps_drops);
2355 TCPSTAT_INC(tcps_conndrops);
2356 if (errno == ETIMEDOUT && tp->t_softerror)
2357 errno = tp->t_softerror;
2358 so->so_error = errno;
2359 return (tcp_close(tp));
2363 tcp_discardcb(struct tcpcb *tp)
2365 struct inpcb *inp = tp->t_inpcb;
2367 INP_WLOCK_ASSERT(inp);
2370 * Make sure that all of our timers are stopped before we delete the
2373 * If stopping a timer fails, we schedule a discard function in same
2374 * callout, and the last discard function called will take care of
2375 * deleting the tcpcb.
2377 tp->t_timers->tt_draincnt = 0;
2378 tcp_timer_stop(tp, TT_REXMT);
2379 tcp_timer_stop(tp, TT_PERSIST);
2380 tcp_timer_stop(tp, TT_KEEP);
2381 tcp_timer_stop(tp, TT_2MSL);
2382 tcp_timer_stop(tp, TT_DELACK);
2383 if (tp->t_fb->tfb_tcp_timer_stop_all) {
2385 * Call the stop-all function of the methods,
2386 * this function should call the tcp_timer_stop()
2387 * method with each of the function specific timeouts.
2388 * That stop will be called via the tfb_tcp_timer_stop()
2389 * which should use the async drain function of the
2390 * callout system (see tcp_var.h).
2392 tp->t_fb->tfb_tcp_timer_stop_all(tp);
2395 /* free the reassembly queue, if any */
2396 tcp_reass_flush(tp);
2399 /* Disconnect offload device, if any. */
2400 if (tp->t_flags & TF_TOE)
2401 tcp_offload_detach(tp);
2404 tcp_free_sackholes(tp);
2407 /* Free the TCP PCAP queues. */
2408 tcp_pcap_drain(&(tp->t_inpkts));
2409 tcp_pcap_drain(&(tp->t_outpkts));
2412 /* Allow the CC algorithm to clean up after itself. */
2413 if (CC_ALGO(tp)->cb_destroy != NULL)
2414 CC_ALGO(tp)->cb_destroy(tp->ccv);
2416 /* Detach from the CC algorithm */
2420 khelp_destroy_osd(tp->osd);
2423 stats_blob_destroy(tp->t_stats);
2427 inp->inp_ppcb = NULL;
2428 if (tp->t_timers->tt_draincnt == 0) {
2429 bool released __diagused;
2431 released = tcp_freecb(tp);
2432 KASSERT(!released, ("%s: inp %p should not have been released "
2433 "here", __func__, inp));
2438 tcp_freecb(struct tcpcb *tp)
2440 struct inpcb *inp = tp->t_inpcb;
2441 struct socket *so = inp->inp_socket;
2443 bool isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
2446 INP_WLOCK_ASSERT(inp);
2447 MPASS(tp->t_timers->tt_draincnt == 0);
2449 /* We own the last reference on tcpcb, let's free it. */
2451 tcp_log_tcpcbfini(tp);
2453 TCPSTATES_DEC(tp->t_state);
2454 if (tp->t_fb->tfb_tcp_fb_fini)
2455 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
2458 * If we got enough samples through the srtt filter,
2459 * save the rtt and rttvar in the routing entry.
2460 * 'Enough' is arbitrarily defined as 4 rtt samples.
2461 * 4 samples is enough for the srtt filter to converge
2462 * to within enough % of the correct value; fewer samples
2463 * and we could save a bogus rtt. The danger is not high
2464 * as tcp quickly recovers from everything.
2465 * XXX: Works very well but needs some more statistics!
2467 * XXXRRS: Updating must be after the stack fini() since
2468 * that may be converting some internal representation of
2469 * say srtt etc into the general one used by other stacks.
2470 * Lets also at least protect against the so being NULL
2471 * as RW stated below.
2473 if ((tp->t_rttupdated >= 4) && (so != NULL)) {
2474 struct hc_metrics_lite metrics;
2477 bzero(&metrics, sizeof(metrics));
2479 * Update the ssthresh always when the conditions below
2480 * are satisfied. This gives us better new start value
2481 * for the congestion avoidance for new connections.
2482 * ssthresh is only set if packet loss occurred on a session.
2484 * XXXRW: 'so' may be NULL here, and/or socket buffer may be
2485 * being torn down. Ideally this code would not use 'so'.
2487 ssthresh = tp->snd_ssthresh;
2488 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
2490 * convert the limit from user data bytes to
2491 * packets then to packet data bytes.
2493 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
2496 ssthresh *= (tp->t_maxseg +
2498 (isipv6 ? sizeof (struct ip6_hdr) +
2499 sizeof (struct tcphdr) :
2501 sizeof (struct tcpiphdr)
2508 metrics.rmx_ssthresh = ssthresh;
2510 metrics.rmx_rtt = tp->t_srtt;
2511 metrics.rmx_rttvar = tp->t_rttvar;
2512 metrics.rmx_cwnd = tp->snd_cwnd;
2513 metrics.rmx_sendpipe = 0;
2514 metrics.rmx_recvpipe = 0;
2516 tcp_hc_update(&inp->inp_inc, &metrics);
2519 refcount_release(&tp->t_fb->tfb_refcnt);
2520 uma_zfree(V_tcpcb_zone, tp);
2522 return (in_pcbrele_wlocked(inp));
2526 * Attempt to close a TCP control block, marking it as dropped, and freeing
2527 * the socket if we hold the only reference.
2530 tcp_close(struct tcpcb *tp)
2532 struct inpcb *inp = tp->t_inpcb;
2535 INP_WLOCK_ASSERT(inp);
2538 if (tp->t_state == TCPS_LISTEN)
2539 tcp_offload_listen_stop(tp);
2542 * This releases the TFO pending counter resource for TFO listen
2543 * sockets as well as passively-created TFO sockets that transition
2544 * from SYN_RECEIVED to CLOSED.
2546 if (tp->t_tfo_pending) {
2547 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
2548 tp->t_tfo_pending = NULL;
2551 tcp_hpts_remove(inp);
2554 TCPSTAT_INC(tcps_closed);
2555 if (tp->t_state != TCPS_CLOSED)
2556 tcp_state_change(tp, TCPS_CLOSED);
2557 KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
2558 so = inp->inp_socket;
2559 soisdisconnected(so);
2560 if (inp->inp_flags & INP_SOCKREF) {
2561 inp->inp_flags &= ~INP_SOCKREF;
2570 * Notify a tcp user of an asynchronous error;
2571 * store error as soft error, but wake up user
2572 * (for now, won't do anything until can select for soft error).
2574 * Do not wake up user since there currently is no mechanism for
2575 * reporting soft errors (yet - a kqueue filter may be added).
2577 static struct inpcb *
2578 tcp_notify(struct inpcb *inp, int error)
2582 INP_WLOCK_ASSERT(inp);
2584 if ((inp->inp_flags & INP_TIMEWAIT) ||
2585 (inp->inp_flags & INP_DROPPED))
2588 tp = intotcpcb(inp);
2589 KASSERT(tp != NULL, ("tcp_notify: tp == NULL"));
2592 * Ignore some errors if we are hooked up.
2593 * If connection hasn't completed, has retransmitted several times,
2594 * and receives a second error, give up now. This is better
2595 * than waiting a long time to establish a connection that
2596 * can never complete.
2598 if (tp->t_state == TCPS_ESTABLISHED &&
2599 (error == EHOSTUNREACH || error == ENETUNREACH ||
2600 error == EHOSTDOWN)) {
2601 if (inp->inp_route.ro_nh) {
2602 NH_FREE(inp->inp_route.ro_nh);
2603 inp->inp_route.ro_nh = (struct nhop_object *)NULL;
2606 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
2608 tp = tcp_drop(tp, error);
2614 tp->t_softerror = error;
2618 wakeup( &so->so_timeo);
2625 tcp_pcblist(SYSCTL_HANDLER_ARGS)
2627 struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo,
2628 INPLOOKUP_RLOCKPCB);
2633 if (req->newptr != NULL)
2636 if (req->oldptr == NULL) {
2639 n = V_tcbinfo.ipi_count +
2640 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
2641 n += imax(n / 8, 10);
2642 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb);
2646 if ((error = sysctl_wire_old_buffer(req, 0)) != 0)
2649 bzero(&xig, sizeof(xig));
2650 xig.xig_len = sizeof xig;
2651 xig.xig_count = V_tcbinfo.ipi_count +
2652 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
2653 xig.xig_gen = V_tcbinfo.ipi_gencnt;
2654 xig.xig_sogen = so_gencnt;
2655 error = SYSCTL_OUT(req, &xig, sizeof xig);
2659 error = syncache_pcblist(req);
2663 while ((inp = inp_next(&inpi)) != NULL) {
2664 if (inp->inp_gencnt <= xig.xig_gen) {
2668 * XXX: This use of cr_cansee(), introduced with
2669 * TCP state changes, is not quite right, but for
2670 * now, better than nothing.
2672 if (inp->inp_flags & INP_TIMEWAIT) {
2673 if (intotw(inp) != NULL)
2674 crerr = cr_cansee(req->td->td_ucred,
2675 intotw(inp)->tw_cred);
2677 crerr = EINVAL; /* Skip this inp. */
2679 crerr = cr_canseeinpcb(req->td->td_ucred, inp);
2683 tcp_inptoxtp(inp, &xt);
2684 error = SYSCTL_OUT(req, &xt, sizeof xt);
2696 * Give the user an updated idea of our state.
2697 * If the generation differs from what we told
2698 * her before, she knows that something happened
2699 * while we were processing this request, and it
2700 * might be necessary to retry.
2702 xig.xig_gen = V_tcbinfo.ipi_gencnt;
2703 xig.xig_sogen = so_gencnt;
2704 xig.xig_count = V_tcbinfo.ipi_count +
2705 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
2706 error = SYSCTL_OUT(req, &xig, sizeof xig);
2712 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
2713 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2714 NULL, 0, tcp_pcblist, "S,xtcpcb",
2715 "List of active TCP connections");
2719 tcp_getcred(SYSCTL_HANDLER_ARGS)
2722 struct sockaddr_in addrs[2];
2723 struct epoch_tracker et;
2727 error = priv_check(req->td, PRIV_NETINET_GETCRED);
2730 error = SYSCTL_IN(req, addrs, sizeof(addrs));
2733 NET_EPOCH_ENTER(et);
2734 inp = in_pcblookup(&V_tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
2735 addrs[0].sin_addr, addrs[0].sin_port, INPLOOKUP_RLOCKPCB, NULL);
2738 if (inp->inp_socket == NULL)
2741 error = cr_canseeinpcb(req->td->td_ucred, inp);
2743 cru2x(inp->inp_cred, &xuc);
2748 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
2752 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
2753 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT,
2754 0, 0, tcp_getcred, "S,xucred",
2755 "Get the xucred of a TCP connection");
2760 tcp6_getcred(SYSCTL_HANDLER_ARGS)
2762 struct epoch_tracker et;
2764 struct sockaddr_in6 addrs[2];
2771 error = priv_check(req->td, PRIV_NETINET_GETCRED);
2774 error = SYSCTL_IN(req, addrs, sizeof(addrs));
2777 if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 ||
2778 (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) {
2781 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
2783 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
2790 NET_EPOCH_ENTER(et);
2793 inp = in_pcblookup(&V_tcbinfo,
2794 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
2796 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
2797 addrs[0].sin6_port, INPLOOKUP_RLOCKPCB, NULL);
2800 inp = in6_pcblookup(&V_tcbinfo,
2801 &addrs[1].sin6_addr, addrs[1].sin6_port,
2802 &addrs[0].sin6_addr, addrs[0].sin6_port,
2803 INPLOOKUP_RLOCKPCB, NULL);
2806 if (inp->inp_socket == NULL)
2809 error = cr_canseeinpcb(req->td->td_ucred, inp);
2811 cru2x(inp->inp_cred, &xuc);
2816 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
2820 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
2821 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT,
2822 0, 0, tcp6_getcred, "S,xucred",
2823 "Get the xucred of a TCP6 connection");
2827 /* Path MTU to try next when a fragmentation-needed message is received. */
2829 tcp_next_pmtu(const struct icmp *icp, const struct ip *ip)
2831 int mtu = ntohs(icp->icmp_nextmtu);
2833 /* If no alternative MTU was proposed, try the next smaller one. */
2835 mtu = ip_next_mtu(ntohs(ip->ip_len), 1);
2836 if (mtu < V_tcp_minmss + sizeof(struct tcpiphdr))
2837 mtu = V_tcp_minmss + sizeof(struct tcpiphdr);
2843 tcp_ctlinput_with_port(int cmd, struct sockaddr *sa, void *vip, uint16_t port)
2845 struct ip *ip = vip;
2847 struct in_addr faddr;
2850 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
2852 struct in_conninfo inc;
2853 tcp_seq icmp_tcp_seq;
2856 faddr = ((struct sockaddr_in *)sa)->sin_addr;
2857 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
2860 if (cmd == PRC_MSGSIZE)
2861 notify = tcp_mtudisc_notify;
2862 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2863 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
2864 cmd == PRC_TIMXCEED_INTRANS) && ip)
2865 notify = tcp_drop_syn_sent;
2868 * Hostdead is ugly because it goes linearly through all PCBs.
2869 * XXX: We never get this from ICMP, otherwise it makes an
2870 * excellent DoS attack on machines with many connections.
2872 else if (cmd == PRC_HOSTDEAD)
2874 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
2878 in_pcbnotifyall(&V_tcbinfo, faddr, inetctlerrmap[cmd], notify);
2882 icp = (struct icmp *)((caddr_t)ip - offsetof(struct icmp, icmp_ip));
2883 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2884 inp = in_pcblookup(&V_tcbinfo, faddr, th->th_dport, ip->ip_src,
2885 th->th_sport, INPLOOKUP_WLOCKPCB, NULL);
2886 if (inp != NULL && PRC_IS_REDIRECT(cmd)) {
2887 /* signal EHOSTDOWN, as it flushes the cached route */
2888 inp = (*notify)(inp, EHOSTDOWN);
2891 icmp_tcp_seq = th->th_seq;
2893 if (!(inp->inp_flags & INP_TIMEWAIT) &&
2894 !(inp->inp_flags & INP_DROPPED) &&
2895 !(inp->inp_socket == NULL)) {
2896 tp = intotcpcb(inp);
2898 if (tp->t_flags & TF_TOE && cmd == PRC_MSGSIZE) {
2900 * MTU discovery for offloaded connections. Let
2901 * the TOE driver verify seq# and process it.
2903 mtu = tcp_next_pmtu(icp, ip);
2904 tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu);
2908 if (tp->t_port != port) {
2911 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) &&
2912 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) {
2913 if (cmd == PRC_MSGSIZE) {
2915 * MTU discovery: we got a needfrag and
2916 * will potentially try a lower MTU.
2918 mtu = tcp_next_pmtu(icp, ip);
2921 * Only process the offered MTU if it
2922 * is smaller than the current one.
2924 if (mtu < tp->t_maxseg +
2925 sizeof(struct tcpiphdr)) {
2926 bzero(&inc, sizeof(inc));
2927 inc.inc_faddr = faddr;
2929 inp->inp_inc.inc_fibnum;
2930 tcp_hc_updatemtu(&inc, mtu);
2931 inp = tcp_mtudisc(inp, mtu);
2934 inp = (*notify)(inp,
2935 inetctlerrmap[cmd]);
2939 bzero(&inc, sizeof(inc));
2940 inc.inc_fport = th->th_dport;
2941 inc.inc_lport = th->th_sport;
2942 inc.inc_faddr = faddr;
2943 inc.inc_laddr = ip->ip_src;
2944 syncache_unreach(&inc, icmp_tcp_seq, port);
2952 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
2954 tcp_ctlinput_with_port(cmd, sa, vip, htons(0));
2958 tcp_ctlinput_viaudp(int cmd, struct sockaddr *sa, void *vip, void *unused)
2960 /* Its a tunneled TCP over UDP icmp */
2961 struct ip *outer_ip, *inner_ip;
2964 struct tcphdr *th, ttemp;
2968 inner_ip = (struct ip *)vip;
2969 icmp = (struct icmp *)((caddr_t)inner_ip -
2970 (sizeof(struct icmp) - sizeof(struct ip)));
2971 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
2972 i_hlen = inner_ip->ip_hl << 2;
2973 o_len = ntohs(outer_ip->ip_len);
2975 (sizeof(struct ip) + 8 + i_hlen + sizeof(struct udphdr) + offsetof(struct tcphdr, th_ack))) {
2976 /* Not enough data present */
2979 /* Ok lets strip out the inner udphdr header by copying up on top of it the tcp hdr */
2980 udp = (struct udphdr *)(((caddr_t)inner_ip) + i_hlen);
2981 if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) {
2984 port = udp->uh_dport;
2985 th = (struct tcphdr *)(udp + 1);
2986 memcpy(&ttemp, th, sizeof(struct tcphdr));
2987 memcpy(udp, &ttemp, sizeof(struct tcphdr));
2988 /* Now adjust down the size of the outer IP header */
2989 o_len -= sizeof(struct udphdr);
2990 outer_ip->ip_len = htons(o_len);
2991 /* Now call in to the normal handling code */
2992 tcp_ctlinput_with_port(cmd, sa, vip, port);
2998 tcp6_next_pmtu(const struct icmp6_hdr *icmp6)
3000 int mtu = ntohl(icmp6->icmp6_mtu);
3003 * If no alternative MTU was proposed, or the proposed MTU was too
3004 * small, set to the min.
3006 if (mtu < IPV6_MMTU)
3007 mtu = IPV6_MMTU - 8; /* XXXNP: what is the adjustment for? */
3012 tcp6_ctlinput_with_port(int cmd, struct sockaddr *sa, void *d, uint16_t port)
3014 struct in6_addr *dst;
3015 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
3016 struct ip6_hdr *ip6;
3020 struct icmp6_hdr *icmp6;
3021 struct ip6ctlparam *ip6cp = NULL;
3022 const struct sockaddr_in6 *sa6_src = NULL;
3023 struct in_conninfo inc;
3028 tcp_seq icmp_tcp_seq;
3032 if (sa->sa_family != AF_INET6 ||
3033 sa->sa_len != sizeof(struct sockaddr_in6))
3036 /* if the parameter is from icmp6, decode it. */
3038 ip6cp = (struct ip6ctlparam *)d;
3039 icmp6 = ip6cp->ip6c_icmp6;
3041 ip6 = ip6cp->ip6c_ip6;
3042 off = ip6cp->ip6c_off;
3043 sa6_src = ip6cp->ip6c_src;
3044 dst = ip6cp->ip6c_finaldst;
3048 off = 0; /* fool gcc */
3053 if (cmd == PRC_MSGSIZE)
3054 notify = tcp_mtudisc_notify;
3055 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
3056 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
3057 cmd == PRC_TIMXCEED_INTRANS) && ip6 != NULL)
3058 notify = tcp_drop_syn_sent;
3061 * Hostdead is ugly because it goes linearly through all PCBs.
3062 * XXX: We never get this from ICMP, otherwise it makes an
3063 * excellent DoS attack on machines with many connections.
3065 else if (cmd == PRC_HOSTDEAD)
3067 else if ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0)
3071 in6_pcbnotify(&V_tcbinfo, sa, 0,
3072 (const struct sockaddr *)sa6_src,
3073 0, cmd, NULL, notify);
3077 /* Check if we can safely get the ports from the tcp hdr */
3080 (int32_t) (off + sizeof(struct tcp_ports)))) {
3083 bzero(&t_ports, sizeof(struct tcp_ports));
3084 m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports);
3085 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_dst, t_ports.th_dport,
3086 &ip6->ip6_src, t_ports.th_sport, INPLOOKUP_WLOCKPCB, NULL);
3087 if (inp != NULL && PRC_IS_REDIRECT(cmd)) {
3088 /* signal EHOSTDOWN, as it flushes the cached route */
3089 inp = (*notify)(inp, EHOSTDOWN);
3092 off += sizeof(struct tcp_ports);
3093 if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) {
3096 m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq);
3098 if (!(inp->inp_flags & INP_TIMEWAIT) &&
3099 !(inp->inp_flags & INP_DROPPED) &&
3100 !(inp->inp_socket == NULL)) {
3101 tp = intotcpcb(inp);
3103 if (tp->t_flags & TF_TOE && cmd == PRC_MSGSIZE) {
3104 /* MTU discovery for offloaded connections. */
3105 mtu = tcp6_next_pmtu(icmp6);
3106 tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu);
3110 if (tp->t_port != port) {
3113 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) &&
3114 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) {
3115 if (cmd == PRC_MSGSIZE) {
3118 * If we got a needfrag set the MTU
3119 * in the route to the suggested new
3120 * value (if given) and then notify.
3122 mtu = tcp6_next_pmtu(icmp6);
3124 bzero(&inc, sizeof(inc));
3125 inc.inc_fibnum = M_GETFIB(m);
3126 inc.inc_flags |= INC_ISIPV6;
3127 inc.inc6_faddr = *dst;
3128 if (in6_setscope(&inc.inc6_faddr,
3129 m->m_pkthdr.rcvif, NULL))
3132 * Only process the offered MTU if it
3133 * is smaller than the current one.
3135 if (mtu < tp->t_maxseg +
3136 sizeof (struct tcphdr) +
3137 sizeof (struct ip6_hdr)) {
3138 tcp_hc_updatemtu(&inc, mtu);
3139 tcp_mtudisc(inp, mtu);
3140 ICMP6STAT_INC(icp6s_pmtuchg);
3143 inp = (*notify)(inp,
3144 inet6ctlerrmap[cmd]);
3148 bzero(&inc, sizeof(inc));
3149 inc.inc_fibnum = M_GETFIB(m);
3150 inc.inc_flags |= INC_ISIPV6;
3151 inc.inc_fport = t_ports.th_dport;
3152 inc.inc_lport = t_ports.th_sport;
3153 inc.inc6_faddr = *dst;
3154 inc.inc6_laddr = ip6->ip6_src;
3155 syncache_unreach(&inc, icmp_tcp_seq, port);
3163 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
3165 tcp6_ctlinput_with_port(cmd, sa, d, htons(0));
3169 tcp6_ctlinput_viaudp(int cmd, struct sockaddr *sa, void *d, void *unused)
3171 struct ip6ctlparam *ip6cp;
3176 ip6cp = (struct ip6ctlparam *)d;
3177 m = m_pulldown(ip6cp->ip6c_m, ip6cp->ip6c_off, sizeof(struct udphdr), NULL);
3181 udp = mtod(m, struct udphdr *);
3182 if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) {
3185 port = udp->uh_dport;
3186 m_adj(m, sizeof(struct udphdr));
3187 if ((m->m_flags & M_PKTHDR) == 0) {
3188 ip6cp->ip6c_m->m_pkthdr.len -= sizeof(struct udphdr);
3190 /* Now call in to the normal handling code */
3191 tcp6_ctlinput_with_port(cmd, sa, d, port);
3197 tcp_keyed_hash(struct in_conninfo *inc, u_char *key, u_int len)
3202 KASSERT(len >= SIPHASH_KEY_LENGTH,
3203 ("%s: keylen %u too short ", __func__, len));
3204 SipHash24_Init(&ctx);
3205 SipHash_SetKey(&ctx, (uint8_t *)key);
3206 SipHash_Update(&ctx, &inc->inc_fport, sizeof(uint16_t));
3207 SipHash_Update(&ctx, &inc->inc_lport, sizeof(uint16_t));
3208 switch (inc->inc_flags & INC_ISIPV6) {
3211 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(struct in_addr));
3212 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(struct in_addr));
3217 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(struct in6_addr));
3218 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(struct in6_addr));
3222 SipHash_Final((uint8_t *)hash, &ctx);
3224 return (hash[0] ^ hash[1]);
3228 tcp_new_ts_offset(struct in_conninfo *inc)
3230 struct in_conninfo inc_store, *local_inc;
3232 if (!V_tcp_ts_offset_per_conn) {
3233 memcpy(&inc_store, inc, sizeof(struct in_conninfo));
3234 inc_store.inc_lport = 0;
3235 inc_store.inc_fport = 0;
3236 local_inc = &inc_store;
3240 return (tcp_keyed_hash(local_inc, V_ts_offset_secret,
3241 sizeof(V_ts_offset_secret)));
3245 * Following is where TCP initial sequence number generation occurs.
3247 * There are two places where we must use initial sequence numbers:
3248 * 1. In SYN-ACK packets.
3249 * 2. In SYN packets.
3251 * All ISNs for SYN-ACK packets are generated by the syncache. See
3252 * tcp_syncache.c for details.
3254 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
3255 * depends on this property. In addition, these ISNs should be
3256 * unguessable so as to prevent connection hijacking. To satisfy
3257 * the requirements of this situation, the algorithm outlined in
3258 * RFC 1948 is used, with only small modifications.
3260 * Implementation details:
3262 * Time is based off the system timer, and is corrected so that it
3263 * increases by one megabyte per second. This allows for proper
3264 * recycling on high speed LANs while still leaving over an hour
3267 * As reading the *exact* system time is too expensive to be done
3268 * whenever setting up a TCP connection, we increment the time
3269 * offset in two ways. First, a small random positive increment
3270 * is added to isn_offset for each connection that is set up.
3271 * Second, the function tcp_isn_tick fires once per clock tick
3272 * and increments isn_offset as necessary so that sequence numbers
3273 * are incremented at approximately ISN_BYTES_PER_SECOND. The
3274 * random positive increments serve only to ensure that the same
3275 * exact sequence number is never sent out twice (as could otherwise
3276 * happen when a port is recycled in less than the system tick
3279 * net.inet.tcp.isn_reseed_interval controls the number of seconds
3280 * between seeding of isn_secret. This is normally set to zero,
3281 * as reseeding should not be necessary.
3283 * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
3284 * isn_offset_old, and isn_ctx is performed using the ISN lock. In
3285 * general, this means holding an exclusive (write) lock.
3288 #define ISN_BYTES_PER_SECOND 1048576
3289 #define ISN_STATIC_INCREMENT 4096
3290 #define ISN_RANDOM_INCREMENT (4096 - 1)
3291 #define ISN_SECRET_LENGTH SIPHASH_KEY_LENGTH
3293 VNET_DEFINE_STATIC(u_char, isn_secret[ISN_SECRET_LENGTH]);
3294 VNET_DEFINE_STATIC(int, isn_last);
3295 VNET_DEFINE_STATIC(int, isn_last_reseed);
3296 VNET_DEFINE_STATIC(u_int32_t, isn_offset);
3297 VNET_DEFINE_STATIC(u_int32_t, isn_offset_old);
3299 #define V_isn_secret VNET(isn_secret)
3300 #define V_isn_last VNET(isn_last)
3301 #define V_isn_last_reseed VNET(isn_last_reseed)
3302 #define V_isn_offset VNET(isn_offset)
3303 #define V_isn_offset_old VNET(isn_offset_old)
3306 tcp_new_isn(struct in_conninfo *inc)
3309 u_int32_t projected_offset;
3312 /* Seed if this is the first use, reseed if requested. */
3313 if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) &&
3314 (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz)
3316 arc4rand(&V_isn_secret, sizeof(V_isn_secret), 0);
3317 V_isn_last_reseed = ticks;
3320 /* Compute the hash and return the ISN. */
3321 new_isn = (tcp_seq)tcp_keyed_hash(inc, V_isn_secret,
3322 sizeof(V_isn_secret));
3323 V_isn_offset += ISN_STATIC_INCREMENT +
3324 (arc4random() & ISN_RANDOM_INCREMENT);
3325 if (ticks != V_isn_last) {
3326 projected_offset = V_isn_offset_old +
3327 ISN_BYTES_PER_SECOND / hz * (ticks - V_isn_last);
3328 if (SEQ_GT(projected_offset, V_isn_offset))
3329 V_isn_offset = projected_offset;
3330 V_isn_offset_old = V_isn_offset;
3333 new_isn += V_isn_offset;
3339 * When a specific ICMP unreachable message is received and the
3340 * connection state is SYN-SENT, drop the connection. This behavior
3341 * is controlled by the icmp_may_rst sysctl.
3344 tcp_drop_syn_sent(struct inpcb *inp, int errno)
3349 INP_WLOCK_ASSERT(inp);
3351 if ((inp->inp_flags & INP_TIMEWAIT) ||
3352 (inp->inp_flags & INP_DROPPED))
3355 tp = intotcpcb(inp);
3356 if (tp->t_state != TCPS_SYN_SENT)
3359 if (IS_FASTOPEN(tp->t_flags))
3360 tcp_fastopen_disable_path(tp);
3362 tp = tcp_drop(tp, errno);
3370 * When `need fragmentation' ICMP is received, update our idea of the MSS
3371 * based on the new value. Also nudge TCP to send something, since we
3372 * know the packet we just sent was dropped.
3373 * This duplicates some code in the tcp_mss() function in tcp_input.c.
3375 static struct inpcb *
3376 tcp_mtudisc_notify(struct inpcb *inp, int error)
3379 return (tcp_mtudisc(inp, -1));
3382 static struct inpcb *
3383 tcp_mtudisc(struct inpcb *inp, int mtuoffer)
3388 INP_WLOCK_ASSERT(inp);
3389 if ((inp->inp_flags & INP_TIMEWAIT) ||
3390 (inp->inp_flags & INP_DROPPED))
3393 tp = intotcpcb(inp);
3394 KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL"));
3396 tcp_mss_update(tp, -1, mtuoffer, NULL, NULL);
3398 so = inp->inp_socket;
3399 SOCKBUF_LOCK(&so->so_snd);
3400 /* If the mss is larger than the socket buffer, decrease the mss. */
3401 if (so->so_snd.sb_hiwat < tp->t_maxseg)
3402 tp->t_maxseg = so->so_snd.sb_hiwat;
3403 SOCKBUF_UNLOCK(&so->so_snd);
3405 TCPSTAT_INC(tcps_mturesent);
3407 tp->snd_nxt = tp->snd_una;
3408 tcp_free_sackholes(tp);
3409 tp->snd_recover = tp->snd_max;
3410 if (tp->t_flags & TF_SACK_PERMIT)
3411 EXIT_FASTRECOVERY(tp->t_flags);
3412 if (tp->t_fb->tfb_tcp_mtu_chg != NULL) {
3414 * Conceptually the snd_nxt setting
3415 * and freeing sack holes should
3416 * be done by the default stacks
3417 * own tfb_tcp_mtu_chg().
3419 tp->t_fb->tfb_tcp_mtu_chg(tp);
3421 if (tcp_output(tp) < 0)
3429 * Look-up the routing entry to the peer of this inpcb. If no route
3430 * is found and it cannot be allocated, then return 0. This routine
3431 * is called by TCP routines that access the rmx structure and by
3432 * tcp_mss_update to get the peer/interface MTU.
3435 tcp_maxmtu(struct in_conninfo *inc, struct tcp_ifcap *cap)
3437 struct nhop_object *nh;
3439 uint32_t maxmtu = 0;
3441 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
3443 if (inc->inc_faddr.s_addr != INADDR_ANY) {
3444 nh = fib4_lookup(inc->inc_fibnum, inc->inc_faddr, 0, NHR_NONE, 0);
3449 maxmtu = nh->nh_mtu;
3451 /* Report additional interface capabilities. */
3453 if (ifp->if_capenable & IFCAP_TSO4 &&
3454 ifp->if_hwassist & CSUM_TSO) {
3455 cap->ifcap |= CSUM_TSO;
3456 cap->tsomax = ifp->if_hw_tsomax;
3457 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount;
3458 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize;
3468 tcp_maxmtu6(struct in_conninfo *inc, struct tcp_ifcap *cap)
3470 struct nhop_object *nh;
3471 struct in6_addr dst6;
3474 uint32_t maxmtu = 0;
3476 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
3478 if (inc->inc_flags & INC_IPV6MINMTU)
3481 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
3482 in6_splitscope(&inc->inc6_faddr, &dst6, &scopeid);
3483 nh = fib6_lookup(inc->inc_fibnum, &dst6, scopeid, NHR_NONE, 0);
3488 maxmtu = nh->nh_mtu;
3490 /* Report additional interface capabilities. */
3492 if (ifp->if_capenable & IFCAP_TSO6 &&
3493 ifp->if_hwassist & CSUM_TSO) {
3494 cap->ifcap |= CSUM_TSO;
3495 cap->tsomax = ifp->if_hw_tsomax;
3496 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount;
3497 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize;
3506 * Handle setsockopt(IPV6_USE_MIN_MTU) by a TCP stack.
3508 * XXXGL: we are updating inpcb here with INC_IPV6MINMTU flag.
3509 * The right place to do that is ip6_setpktopt() that has just been
3510 * executed. By the way it just filled ip6po_minmtu for us.
3513 tcp6_use_min_mtu(struct tcpcb *tp)
3515 struct inpcb *inp = tp->t_inpcb;
3517 INP_WLOCK_ASSERT(inp);
3519 * In case of the IPV6_USE_MIN_MTU socket
3520 * option, the INC_IPV6MINMTU flag to announce
3521 * a corresponding MSS during the initial
3522 * handshake. If the TCP connection is not in
3523 * the front states, just reduce the MSS being
3524 * used. This avoids the sending of TCP
3525 * segments which will be fragmented at the
3528 inp->inp_inc.inc_flags |= INC_IPV6MINMTU;
3529 if ((tp->t_state >= TCPS_SYN_SENT) &&
3530 (inp->inp_inc.inc_flags & INC_ISIPV6)) {
3531 struct ip6_pktopts *opt;
3533 opt = inp->in6p_outputopts;
3534 if (opt != NULL && opt->ip6po_minmtu == IP6PO_MINMTU_ALL &&
3535 tp->t_maxseg > TCP6_MSS)
3536 tp->t_maxseg = TCP6_MSS;
3542 * Calculate effective SMSS per RFC5681 definition for a given TCP
3543 * connection at its current state, taking into account SACK and etc.
3546 tcp_maxseg(const struct tcpcb *tp)
3550 if (tp->t_flags & TF_NOOPT)
3551 return (tp->t_maxseg);
3554 * Here we have a simplified code from tcp_addoptions(),
3555 * without a proper loop, and having most of paddings hardcoded.
3556 * We might make mistakes with padding here in some edge cases,
3557 * but this is harmless, since result of tcp_maxseg() is used
3558 * only in cwnd and ssthresh estimations.
3560 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
3561 if (tp->t_flags & TF_RCVD_TSTMP)
3562 optlen = TCPOLEN_TSTAMP_APPA;
3565 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3566 if (tp->t_flags & TF_SIGNATURE)
3567 optlen += PADTCPOLEN(TCPOLEN_SIGNATURE);
3569 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks > 0) {
3570 optlen += TCPOLEN_SACKHDR;
3571 optlen += tp->rcv_numsacks * TCPOLEN_SACK;
3572 optlen = PADTCPOLEN(optlen);
3575 if (tp->t_flags & TF_REQ_TSTMP)
3576 optlen = TCPOLEN_TSTAMP_APPA;
3578 optlen = PADTCPOLEN(TCPOLEN_MAXSEG);
3579 if (tp->t_flags & TF_REQ_SCALE)
3580 optlen += PADTCPOLEN(TCPOLEN_WINDOW);
3581 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3582 if (tp->t_flags & TF_SIGNATURE)
3583 optlen += PADTCPOLEN(TCPOLEN_SIGNATURE);
3585 if (tp->t_flags & TF_SACK_PERMIT)
3586 optlen += PADTCPOLEN(TCPOLEN_SACK_PERMITTED);
3589 optlen = min(optlen, TCP_MAXOLEN);
3590 return (tp->t_maxseg - optlen);
3595 tcp_fixed_maxseg(const struct tcpcb *tp)
3599 if (tp->t_flags & TF_NOOPT)
3600 return (tp->t_maxseg);
3603 * Here we have a simplified code from tcp_addoptions(),
3604 * without a proper loop, and having most of paddings hardcoded.
3605 * We only consider fixed options that we would send every
3606 * time I.e. SACK is not considered. This is important
3607 * for cc modules to figure out what the modulo of the
3610 #define PAD(len) ((((len) / 4) + !!((len) % 4)) * 4)
3611 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
3612 if (tp->t_flags & TF_RCVD_TSTMP)
3613 optlen = TCPOLEN_TSTAMP_APPA;
3616 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3617 if (tp->t_flags & TF_SIGNATURE)
3618 optlen += PAD(TCPOLEN_SIGNATURE);
3621 if (tp->t_flags & TF_REQ_TSTMP)
3622 optlen = TCPOLEN_TSTAMP_APPA;
3624 optlen = PAD(TCPOLEN_MAXSEG);
3625 if (tp->t_flags & TF_REQ_SCALE)
3626 optlen += PAD(TCPOLEN_WINDOW);
3627 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3628 if (tp->t_flags & TF_SIGNATURE)
3629 optlen += PAD(TCPOLEN_SIGNATURE);
3631 if (tp->t_flags & TF_SACK_PERMIT)
3632 optlen += PAD(TCPOLEN_SACK_PERMITTED);
3635 optlen = min(optlen, TCP_MAXOLEN);
3636 return (tp->t_maxseg - optlen);
3642 sysctl_drop(SYSCTL_HANDLER_ARGS)
3644 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
3645 struct sockaddr_storage addrs[2];
3650 struct sockaddr_in *fin = NULL, *lin = NULL;
3652 struct epoch_tracker et;
3654 struct sockaddr_in6 *fin6, *lin6;
3664 if (req->oldptr != NULL || req->oldlen != 0)
3666 if (req->newptr == NULL)
3668 if (req->newlen < sizeof(addrs))
3670 error = SYSCTL_IN(req, &addrs, sizeof(addrs));
3674 switch (addrs[0].ss_family) {
3677 fin6 = (struct sockaddr_in6 *)&addrs[0];
3678 lin6 = (struct sockaddr_in6 *)&addrs[1];
3679 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
3680 lin6->sin6_len != sizeof(struct sockaddr_in6))
3682 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
3683 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
3685 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
3686 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
3688 fin = (struct sockaddr_in *)&addrs[0];
3689 lin = (struct sockaddr_in *)&addrs[1];
3693 error = sa6_embedscope(fin6, V_ip6_use_defzone);
3696 error = sa6_embedscope(lin6, V_ip6_use_defzone);
3703 fin = (struct sockaddr_in *)&addrs[0];
3704 lin = (struct sockaddr_in *)&addrs[1];
3705 if (fin->sin_len != sizeof(struct sockaddr_in) ||
3706 lin->sin_len != sizeof(struct sockaddr_in))
3713 NET_EPOCH_ENTER(et);
3714 switch (addrs[0].ss_family) {
3717 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr,
3718 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port,
3719 INPLOOKUP_WLOCKPCB, NULL);
3724 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port,
3725 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL);
3730 if (inp->inp_flags & INP_TIMEWAIT) {
3732 * XXXRW: There currently exists a state where an
3733 * inpcb is present, but its timewait state has been
3734 * discarded. For now, don't allow dropping of this
3742 } else if ((inp->inp_flags & INP_DROPPED) == 0 &&
3743 !SOLISTENING(inp->inp_socket)) {
3744 tp = intotcpcb(inp);
3745 tp = tcp_drop(tp, ECONNABORTED);
3756 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop,
3757 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3758 CTLFLAG_NEEDGIANT, NULL, 0, sysctl_drop, "",
3759 "Drop TCP connection");
3762 tcp_sysctl_setsockopt(SYSCTL_HANDLER_ARGS)
3764 return (sysctl_setsockopt(oidp, arg1, arg2, req, &V_tcbinfo,
3765 &tcp_ctloutput_set));
3768 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, setsockopt,
3769 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3770 CTLFLAG_MPSAFE, NULL, 0, tcp_sysctl_setsockopt, "",
3771 "Set socket option for TCP endpoint");
3775 sysctl_switch_tls(SYSCTL_HANDLER_ARGS)
3777 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
3778 struct sockaddr_storage addrs[2];
3781 struct sockaddr_in *fin = NULL, *lin = NULL;
3783 struct epoch_tracker et;
3785 struct sockaddr_in6 *fin6, *lin6;
3795 if (req->oldptr != NULL || req->oldlen != 0)
3797 if (req->newptr == NULL)
3799 if (req->newlen < sizeof(addrs))
3801 error = SYSCTL_IN(req, &addrs, sizeof(addrs));
3805 switch (addrs[0].ss_family) {
3808 fin6 = (struct sockaddr_in6 *)&addrs[0];
3809 lin6 = (struct sockaddr_in6 *)&addrs[1];
3810 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
3811 lin6->sin6_len != sizeof(struct sockaddr_in6))
3813 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
3814 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
3816 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
3817 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
3819 fin = (struct sockaddr_in *)&addrs[0];
3820 lin = (struct sockaddr_in *)&addrs[1];
3824 error = sa6_embedscope(fin6, V_ip6_use_defzone);
3827 error = sa6_embedscope(lin6, V_ip6_use_defzone);
3834 fin = (struct sockaddr_in *)&addrs[0];
3835 lin = (struct sockaddr_in *)&addrs[1];
3836 if (fin->sin_len != sizeof(struct sockaddr_in) ||
3837 lin->sin_len != sizeof(struct sockaddr_in))
3844 NET_EPOCH_ENTER(et);
3845 switch (addrs[0].ss_family) {
3848 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr,
3849 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port,
3850 INPLOOKUP_WLOCKPCB, NULL);
3855 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port,
3856 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL);
3862 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) != 0 ||
3863 inp->inp_socket == NULL) {
3869 so = inp->inp_socket;
3871 error = ktls_set_tx_mode(so,
3872 arg2 == 0 ? TCP_TLS_MODE_SW : TCP_TLS_MODE_IFNET);
3881 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_sw_tls,
3882 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3883 CTLFLAG_NEEDGIANT, NULL, 0, sysctl_switch_tls, "",
3884 "Switch TCP connection to SW TLS");
3885 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_ifnet_tls,
3886 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3887 CTLFLAG_NEEDGIANT, NULL, 1, sysctl_switch_tls, "",
3888 "Switch TCP connection to ifnet TLS");
3892 * Generate a standardized TCP log line for use throughout the
3893 * tcp subsystem. Memory allocation is done with M_NOWAIT to
3894 * allow use in the interrupt context.
3896 * NB: The caller MUST free(s, M_TCPLOG) the returned string.
3897 * NB: The function may return NULL if memory allocation failed.
3899 * Due to header inclusion and ordering limitations the struct ip
3900 * and ip6_hdr pointers have to be passed as void pointers.
3903 tcp_log_vain(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr,
3907 /* Is logging enabled? */
3908 if (V_tcp_log_in_vain == 0)
3911 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
3915 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr,
3919 /* Is logging enabled? */
3920 if (tcp_log_debug == 0)
3923 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
3927 tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr,
3933 const struct ip *ip = (const struct ip *)ip4hdr;
3936 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)ip6hdr;
3940 * The log line looks like this:
3941 * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>"
3943 size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") +
3944 sizeof(PRINT_TH_FLAGS) + 1 +
3946 2 * INET6_ADDRSTRLEN;
3948 2 * INET_ADDRSTRLEN;
3951 s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT);
3955 strcat(s, "TCP: [");
3958 if (inc && ((inc->inc_flags & INC_ISIPV6) == 0)) {
3959 inet_ntoa_r(inc->inc_faddr, sp);
3961 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
3963 inet_ntoa_r(inc->inc_laddr, sp);
3965 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
3968 ip6_sprintf(sp, &inc->inc6_faddr);
3970 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
3972 ip6_sprintf(sp, &inc->inc6_laddr);
3974 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
3975 } else if (ip6 && th) {
3976 ip6_sprintf(sp, &ip6->ip6_src);
3978 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
3980 ip6_sprintf(sp, &ip6->ip6_dst);
3982 sprintf(sp, "]:%i", ntohs(th->th_dport));
3985 } else if (ip && th) {
3986 inet_ntoa_r(ip->ip_src, sp);
3988 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
3990 inet_ntoa_r(ip->ip_dst, sp);
3992 sprintf(sp, "]:%i", ntohs(th->th_dport));
4000 sprintf(sp, " tcpflags 0x%b", tcp_get_flags(th), PRINT_TH_FLAGS);
4001 if (*(s + size - 1) != '\0')
4002 panic("%s: string too long", __func__);
4007 * A subroutine which makes it easy to track TCP state changes with DTrace.
4008 * This function shouldn't be called for t_state initializations that don't
4009 * correspond to actual TCP state transitions.
4012 tcp_state_change(struct tcpcb *tp, int newstate)
4014 #if defined(KDTRACE_HOOKS)
4015 int pstate = tp->t_state;
4018 TCPSTATES_DEC(tp->t_state);
4019 TCPSTATES_INC(newstate);
4020 tp->t_state = newstate;
4021 TCP_PROBE6(state__change, NULL, tp, NULL, tp, NULL, pstate);
4025 * Create an external-format (``xtcpcb'') structure using the information in
4026 * the kernel-format tcpcb structure pointed to by tp. This is done to
4027 * reduce the spew of irrelevant information over this interface, to isolate
4028 * user code from changes in the kernel structure, and potentially to provide
4029 * information-hiding if we decide that some of this information should be
4030 * hidden from users.
4033 tcp_inptoxtp(const struct inpcb *inp, struct xtcpcb *xt)
4035 struct tcpcb *tp = intotcpcb(inp);
4036 struct tcptw *tw = intotw(inp);
4039 bzero(xt, sizeof(*xt));
4040 if (inp->inp_flags & INP_TIMEWAIT) {
4041 xt->t_state = TCPS_TIME_WAIT;
4042 xt->xt_encaps_port = tw->t_port;
4044 xt->t_state = tp->t_state;
4045 xt->t_logstate = tp->t_logstate;
4046 xt->t_flags = tp->t_flags;
4047 xt->t_sndzerowin = tp->t_sndzerowin;
4048 xt->t_sndrexmitpack = tp->t_sndrexmitpack;
4049 xt->t_rcvoopack = tp->t_rcvoopack;
4050 xt->t_rcv_wnd = tp->rcv_wnd;
4051 xt->t_snd_wnd = tp->snd_wnd;
4052 xt->t_snd_cwnd = tp->snd_cwnd;
4053 xt->t_snd_ssthresh = tp->snd_ssthresh;
4054 xt->t_dsack_bytes = tp->t_dsack_bytes;
4055 xt->t_dsack_tlp_bytes = tp->t_dsack_tlp_bytes;
4056 xt->t_dsack_pack = tp->t_dsack_pack;
4057 xt->t_maxseg = tp->t_maxseg;
4058 xt->xt_ecn = (tp->t_flags2 & TF2_ECN_PERMIT) ? 1 : 0 +
4059 (tp->t_flags2 & TF2_ACE_PERMIT) ? 2 : 0;
4061 now = getsbinuptime();
4062 #define COPYTIMER(ttt) do { \
4063 if (callout_active(&tp->t_timers->ttt)) \
4064 xt->ttt = (tp->t_timers->ttt.c_time - now) / \
4069 COPYTIMER(tt_delack);
4070 COPYTIMER(tt_rexmt);
4071 COPYTIMER(tt_persist);
4075 xt->t_rcvtime = 1000 * (ticks - tp->t_rcvtime) / hz;
4077 xt->xt_encaps_port = tp->t_port;
4078 bcopy(tp->t_fb->tfb_tcp_block_name, xt->xt_stack,
4079 TCP_FUNCTION_NAME_LEN_MAX);
4080 bcopy(CC_ALGO(tp)->name, xt->xt_cc,
4083 (void)tcp_log_get_id(tp, xt->xt_logid);
4087 xt->xt_len = sizeof(struct xtcpcb);
4088 in_pcbtoxinpcb(inp, &xt->xt_inp);
4089 if (inp->inp_socket == NULL)
4090 xt->xt_inp.xi_socket.xso_protocol = IPPROTO_TCP;
4094 tcp_log_end_status(struct tcpcb *tp, uint8_t status)
4099 (status > TCP_EI_STATUS_MAX_VALUE) ||
4104 if (status > (sizeof(uint32_t) * 8)) {
4105 /* Should this be a KASSERT? */
4108 bit = 1U << (status - 1);
4109 if (bit & tp->t_end_info_status) {
4110 /* already logged */
4113 for (i = 0; i < TCP_END_BYTE_INFO; i++) {
4114 if (tp->t_end_info_bytes[i] == TCP_EI_EMPTY_SLOT) {
4115 tp->t_end_info_bytes[i] = status;
4116 tp->t_end_info_status |= bit;
4123 tcp_can_enable_pacing(void)
4126 if ((tcp_pacing_limit == -1) ||
4127 (tcp_pacing_limit > number_of_tcp_connections_pacing)) {
4128 atomic_fetchadd_int(&number_of_tcp_connections_pacing, 1);
4129 shadow_num_connections = number_of_tcp_connections_pacing;
4136 static uint8_t tcp_pacing_warning = 0;
4139 tcp_decrement_paced_conn(void)
4143 ret = atomic_fetchadd_int(&number_of_tcp_connections_pacing, -1);
4144 shadow_num_connections = number_of_tcp_connections_pacing;
4145 KASSERT(ret != 0, ("tcp_paced_connection_exits -1 would cause wrap?"));
4147 if (tcp_pacing_limit != -1) {
4148 printf("Warning all pacing is now disabled, count decrements invalidly!\n");
4149 tcp_pacing_limit = 0;
4150 } else if (tcp_pacing_warning == 0) {
4151 printf("Warning pacing count is invalid, invalid decrement\n");
4152 tcp_pacing_warning = 1;