2 * Copyright (c) 2016-2018 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include "opt_inet6.h"
32 #include "opt_ipsec.h"
33 #include "opt_tcpdebug.h"
35 #include <sys/param.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
39 #include <sys/hhook.h>
42 #include <sys/malloc.h>
44 #include <sys/mutex.h>
46 #include <sys/proc.h> /* for proc0 declaration */
48 #include <sys/qmath.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
58 #include <sys/refcount.h>
59 #include <sys/queue.h>
61 #include <sys/kthread.h>
62 #include <sys/kern_prefetch.h>
66 #include <net/route.h>
69 #define TCPSTATES /* for logging */
71 #include <netinet/in.h>
72 #include <netinet/in_kdtrace.h>
73 #include <netinet/in_pcb.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
76 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
77 #include <netinet/ip_var.h>
78 #include <netinet/ip6.h>
79 #include <netinet6/in6_pcb.h>
80 #include <netinet6/ip6_var.h>
82 #include <netinet/tcp.h>
83 #include <netinet/tcp_fsm.h>
84 #include <netinet/tcp_log_buf.h>
85 #include <netinet/tcp_seq.h>
86 #include <netinet/tcp_timer.h>
87 #include <netinet/tcp_var.h>
88 #include <netinet/tcp_hpts.h>
89 #include <netinet/tcpip.h>
90 #include <netinet/cc/cc.h>
91 #include <netinet/tcp_fastopen.h>
93 #include <netinet/tcp_debug.h>
96 #include <netinet/tcp_offload.h>
99 #include <netinet6/tcp6_var.h>
102 #include <netipsec/ipsec_support.h>
104 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
105 #include <netipsec/ipsec.h>
106 #include <netipsec/ipsec6.h>
109 #include <netinet/udp.h>
110 #include <netinet/udp_var.h>
111 #include <machine/in_cksum.h>
114 #include <security/mac/mac_framework.h>
116 #include "sack_filter.h"
117 #include "tcp_rack.h"
118 #include "rack_bbr_common.h"
120 uma_zone_t rack_zone;
121 uma_zone_t rack_pcb_zone;
124 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t)))
127 struct sysctl_ctx_list rack_sysctl_ctx;
128 struct sysctl_oid *rack_sysctl_root;
134 * The RACK module incorporates a number of
135 * TCP ideas that have been put out into the IETF
136 * over the last few years:
137 * - Matt Mathis's Rate Halving which slowly drops
138 * the congestion window so that the ack clock can
139 * be maintained during a recovery.
140 * - Yuchung Cheng's RACK TCP (for which its named) that
141 * will stop us using the number of dup acks and instead
142 * use time as the gage of when we retransmit.
143 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
144 * of Dukkipati et.al.
145 * RACK depends on SACK, so if an endpoint arrives that
146 * cannot do SACK the state machine below will shuttle the
147 * connection back to using the "default" TCP stack that is
150 * To implement RACK the original TCP stack was first decomposed
151 * into a functional state machine with individual states
152 * for each of the possible TCP connection states. The do_segement
153 * functions role in life is to mandate the connection supports SACK
154 * initially and then assure that the RACK state matches the conenction
155 * state before calling the states do_segment function. Each
156 * state is simplified due to the fact that the original do_segment
157 * has been decomposed and we *know* what state we are in (no
158 * switches on the state) and all tests for SACK are gone. This
159 * greatly simplifies what each state does.
161 * TCP output is also over-written with a new version since it
162 * must maintain the new rack scoreboard.
165 static int32_t rack_precache = 1;
166 static int32_t rack_tlp_thresh = 1;
167 static int32_t rack_reorder_thresh = 2;
168 static int32_t rack_reorder_fade = 60000; /* 0 - never fade, def 60,000
170 static int32_t rack_pkt_delay = 1;
171 static int32_t rack_inc_var = 0;/* For TLP */
172 static int32_t rack_reduce_largest_on_idle = 0;
173 static int32_t rack_min_pace_time = 0;
174 static int32_t rack_min_pace_time_seg_req=6;
175 static int32_t rack_early_recovery = 1;
176 static int32_t rack_early_recovery_max_seg = 6;
177 static int32_t rack_send_a_lot_in_prr = 1;
178 static int32_t rack_min_to = 1; /* Number of ms minimum timeout */
179 static int32_t rack_tlp_in_recovery = 1; /* Can we do TLP in recovery? */
180 static int32_t rack_verbose_logging = 0;
181 static int32_t rack_ignore_data_after_close = 1;
182 static int32_t rack_map_entries_limit = 1024;
183 static int32_t rack_map_split_limit = 256;
186 * Currently regular tcp has a rto_min of 30ms
187 * the backoff goes 12 times so that ends up
188 * being a total of 122.850 seconds before a
189 * connection is killed.
191 static int32_t rack_tlp_min = 10;
192 static int32_t rack_rto_min = 30; /* 30ms same as main freebsd */
193 static int32_t rack_rto_max = 30000; /* 30 seconds */
194 static const int32_t rack_free_cache = 2;
195 static int32_t rack_hptsi_segments = 40;
196 static int32_t rack_rate_sample_method = USE_RTT_LOW;
197 static int32_t rack_pace_every_seg = 1;
198 static int32_t rack_delayed_ack_time = 200; /* 200ms */
199 static int32_t rack_slot_reduction = 4;
200 static int32_t rack_lower_cwnd_at_tlp = 0;
201 static int32_t rack_use_proportional_reduce = 0;
202 static int32_t rack_proportional_rate = 10;
203 static int32_t rack_tlp_max_resend = 2;
204 static int32_t rack_limited_retran = 0;
205 static int32_t rack_always_send_oldest = 0;
206 static int32_t rack_sack_block_limit = 128;
207 static int32_t rack_use_sack_filter = 1;
208 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
210 /* Rack specific counters */
211 counter_u64_t rack_badfr;
212 counter_u64_t rack_badfr_bytes;
213 counter_u64_t rack_rtm_prr_retran;
214 counter_u64_t rack_rtm_prr_newdata;
215 counter_u64_t rack_timestamp_mismatch;
216 counter_u64_t rack_reorder_seen;
217 counter_u64_t rack_paced_segments;
218 counter_u64_t rack_unpaced_segments;
219 counter_u64_t rack_saw_enobuf;
220 counter_u64_t rack_saw_enetunreach;
222 /* Tail loss probe counters */
223 counter_u64_t rack_tlp_tot;
224 counter_u64_t rack_tlp_newdata;
225 counter_u64_t rack_tlp_retran;
226 counter_u64_t rack_tlp_retran_bytes;
227 counter_u64_t rack_tlp_retran_fail;
228 counter_u64_t rack_to_tot;
229 counter_u64_t rack_to_arm_rack;
230 counter_u64_t rack_to_arm_tlp;
231 counter_u64_t rack_to_alloc;
232 counter_u64_t rack_to_alloc_hard;
233 counter_u64_t rack_to_alloc_emerg;
234 counter_u64_t rack_to_alloc_limited;
235 counter_u64_t rack_alloc_limited_conns;
236 counter_u64_t rack_split_limited;
238 counter_u64_t rack_sack_proc_all;
239 counter_u64_t rack_sack_proc_short;
240 counter_u64_t rack_sack_proc_restart;
241 counter_u64_t rack_runt_sacks;
242 counter_u64_t rack_used_tlpmethod;
243 counter_u64_t rack_used_tlpmethod2;
244 counter_u64_t rack_enter_tlp_calc;
245 counter_u64_t rack_input_idle_reduces;
246 counter_u64_t rack_tlp_does_nada;
248 /* Temp CPU counters */
249 counter_u64_t rack_find_high;
251 counter_u64_t rack_progress_drops;
252 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
253 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
256 * This was originally defined in tcp_timer.c, but is now reproduced here given
257 * the unification of the SYN and non-SYN retransmit timer exponents combined
258 * with wanting to retain previous behaviour for previously deployed stack
261 int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
262 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
265 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line);
268 rack_process_ack(struct mbuf *m, struct tcphdr *th,
269 struct socket *so, struct tcpcb *tp, struct tcpopt *to,
270 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val);
272 rack_process_data(struct mbuf *m, struct tcphdr *th,
273 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
274 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
276 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
277 struct tcphdr *th, uint16_t nsegs, uint16_t type, int32_t recovery);
278 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
279 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack,
281 static struct rack_sendmap *
282 rack_check_recovery_mode(struct tcpcb *tp,
285 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th,
287 static void rack_counter_destroy(void);
289 rack_ctloutput(struct socket *so, struct sockopt *sopt,
290 struct inpcb *inp, struct tcpcb *tp);
291 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
293 rack_do_segment(struct mbuf *m, struct tcphdr *th,
294 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
296 static void rack_dtor(void *mem, int32_t size, void *arg);
298 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm,
299 uint32_t t, uint32_t cts);
300 static struct rack_sendmap *
301 rack_find_high_nonack(struct tcp_rack *rack,
302 struct rack_sendmap *rsm);
303 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
304 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
305 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
307 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
308 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
309 static int32_t rack_handoff_ok(struct tcpcb *tp);
310 static int32_t rack_init(struct tcpcb *tp);
311 static void rack_init_sysctls(void);
313 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
316 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
317 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts,
318 uint8_t pass, struct rack_sendmap *hintrsm);
320 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
321 struct rack_sendmap *rsm);
322 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num);
323 static int32_t rack_output(struct tcpcb *tp);
325 rack_hpts_do_segment(struct mbuf *m, struct tcphdr *th,
326 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
327 uint8_t iptos, int32_t nxt_pkt, struct timeval *tv);
330 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
331 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
333 static void rack_post_recovery(struct tcpcb *tp, struct tcphdr *th);
334 static void rack_remxt_tmr(struct tcpcb *tp);
336 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
337 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
338 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
339 static int32_t rack_stopall(struct tcpcb *tp);
341 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type,
343 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type);
344 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
345 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type);
347 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
348 struct rack_sendmap *rsm, uint32_t ts, int32_t * lenp);
350 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
351 struct rack_sendmap *rsm, uint32_t ts);
353 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
354 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type);
355 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
357 rack_challenge_ack(struct mbuf *m, struct tcphdr *th,
358 struct tcpcb *tp, int32_t * ret_val);
360 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
361 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
362 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
364 rack_do_closing(struct mbuf *m, struct tcphdr *th,
365 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
366 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
367 static void rack_do_drop(struct mbuf *m, struct tcpcb *tp);
369 rack_do_dropafterack(struct mbuf *m, struct tcpcb *tp,
370 struct tcphdr *th, int32_t thflags, int32_t tlen, int32_t * ret_val);
372 rack_do_dropwithreset(struct mbuf *m, struct tcpcb *tp,
373 struct tcphdr *th, int32_t rstreason, int32_t tlen);
375 rack_do_established(struct mbuf *m, struct tcphdr *th,
376 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
377 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
379 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
380 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
381 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt);
383 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
384 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
385 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
387 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
388 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
389 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
391 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
392 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
393 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
395 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
396 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
397 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
399 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
400 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
401 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
403 rack_drop_checks(struct tcpopt *to, struct mbuf *m,
404 struct tcphdr *th, struct tcpcb *tp, int32_t * tlenp, int32_t * thf,
405 int32_t * drop_hdrlen, int32_t * ret_val);
407 rack_process_rst(struct mbuf *m, struct tcphdr *th,
408 struct socket *so, struct tcpcb *tp);
409 struct rack_sendmap *
410 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
412 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt);
414 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th);
417 rack_ts_check(struct mbuf *m, struct tcphdr *th,
418 struct tcpcb *tp, int32_t tlen, int32_t thflags, int32_t * ret_val);
420 int32_t rack_clear_counter=0;
424 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
429 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
430 if (error || req->newptr == NULL)
433 error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
438 printf("Clearing RACK counters\n");
440 counter_u64_zero(rack_badfr);
441 counter_u64_zero(rack_badfr_bytes);
442 counter_u64_zero(rack_rtm_prr_retran);
443 counter_u64_zero(rack_rtm_prr_newdata);
444 counter_u64_zero(rack_timestamp_mismatch);
445 counter_u64_zero(rack_reorder_seen);
446 counter_u64_zero(rack_tlp_tot);
447 counter_u64_zero(rack_tlp_newdata);
448 counter_u64_zero(rack_tlp_retran);
449 counter_u64_zero(rack_tlp_retran_bytes);
450 counter_u64_zero(rack_tlp_retran_fail);
451 counter_u64_zero(rack_to_tot);
452 counter_u64_zero(rack_to_arm_rack);
453 counter_u64_zero(rack_to_arm_tlp);
454 counter_u64_zero(rack_paced_segments);
455 counter_u64_zero(rack_unpaced_segments);
456 counter_u64_zero(rack_saw_enobuf);
457 counter_u64_zero(rack_saw_enetunreach);
458 counter_u64_zero(rack_to_alloc_hard);
459 counter_u64_zero(rack_to_alloc_emerg);
460 counter_u64_zero(rack_sack_proc_all);
461 counter_u64_zero(rack_sack_proc_short);
462 counter_u64_zero(rack_sack_proc_restart);
463 counter_u64_zero(rack_to_alloc);
464 counter_u64_zero(rack_to_alloc_limited);
465 counter_u64_zero(rack_alloc_limited_conns);
466 counter_u64_zero(rack_split_limited);
467 counter_u64_zero(rack_find_high);
468 counter_u64_zero(rack_runt_sacks);
469 counter_u64_zero(rack_used_tlpmethod);
470 counter_u64_zero(rack_used_tlpmethod2);
471 counter_u64_zero(rack_enter_tlp_calc);
472 counter_u64_zero(rack_progress_drops);
473 counter_u64_zero(rack_tlp_does_nada);
475 rack_clear_counter = 0;
482 rack_init_sysctls(void)
484 SYSCTL_ADD_S32(&rack_sysctl_ctx,
485 SYSCTL_CHILDREN(rack_sysctl_root),
486 OID_AUTO, "map_limit", CTLFLAG_RW,
487 &rack_map_entries_limit , 1024,
488 "Is there a limit on how big the sendmap can grow? ");
490 SYSCTL_ADD_S32(&rack_sysctl_ctx,
491 SYSCTL_CHILDREN(rack_sysctl_root),
492 OID_AUTO, "map_splitlimit", CTLFLAG_RW,
493 &rack_map_split_limit , 256,
494 "Is there a limit on how much splitting a peer can do?");
496 SYSCTL_ADD_S32(&rack_sysctl_ctx,
497 SYSCTL_CHILDREN(rack_sysctl_root),
498 OID_AUTO, "rate_sample_method", CTLFLAG_RW,
499 &rack_rate_sample_method , USE_RTT_LOW,
500 "What method should we use for rate sampling 0=high, 1=low ");
501 SYSCTL_ADD_S32(&rack_sysctl_ctx,
502 SYSCTL_CHILDREN(rack_sysctl_root),
503 OID_AUTO, "data_after_close", CTLFLAG_RW,
504 &rack_ignore_data_after_close, 0,
505 "Do we hold off sending a RST until all pending data is ack'd");
506 SYSCTL_ADD_S32(&rack_sysctl_ctx,
507 SYSCTL_CHILDREN(rack_sysctl_root),
508 OID_AUTO, "tlpmethod", CTLFLAG_RW,
509 &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
510 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
511 SYSCTL_ADD_S32(&rack_sysctl_ctx,
512 SYSCTL_CHILDREN(rack_sysctl_root),
513 OID_AUTO, "min_pace_time", CTLFLAG_RW,
514 &rack_min_pace_time, 0,
515 "Should we enforce a minimum pace time of 1ms");
516 SYSCTL_ADD_S32(&rack_sysctl_ctx,
517 SYSCTL_CHILDREN(rack_sysctl_root),
518 OID_AUTO, "min_pace_segs", CTLFLAG_RW,
519 &rack_min_pace_time_seg_req, 6,
520 "How many segments have to be in the len to enforce min-pace-time");
521 SYSCTL_ADD_S32(&rack_sysctl_ctx,
522 SYSCTL_CHILDREN(rack_sysctl_root),
523 OID_AUTO, "idle_reduce_high", CTLFLAG_RW,
524 &rack_reduce_largest_on_idle, 0,
525 "Should we reduce the largest cwnd seen to IW on idle reduction");
526 SYSCTL_ADD_S32(&rack_sysctl_ctx,
527 SYSCTL_CHILDREN(rack_sysctl_root),
528 OID_AUTO, "bb_verbose", CTLFLAG_RW,
529 &rack_verbose_logging, 0,
530 "Should RACK black box logging be verbose");
531 SYSCTL_ADD_S32(&rack_sysctl_ctx,
532 SYSCTL_CHILDREN(rack_sysctl_root),
533 OID_AUTO, "sackfiltering", CTLFLAG_RW,
534 &rack_use_sack_filter, 1,
535 "Do we use sack filtering?");
536 SYSCTL_ADD_S32(&rack_sysctl_ctx,
537 SYSCTL_CHILDREN(rack_sysctl_root),
538 OID_AUTO, "delayed_ack", CTLFLAG_RW,
539 &rack_delayed_ack_time, 200,
540 "Delayed ack time (200ms)");
541 SYSCTL_ADD_S32(&rack_sysctl_ctx,
542 SYSCTL_CHILDREN(rack_sysctl_root),
543 OID_AUTO, "tlpminto", CTLFLAG_RW,
545 "TLP minimum timeout per the specification (10ms)");
546 SYSCTL_ADD_S32(&rack_sysctl_ctx,
547 SYSCTL_CHILDREN(rack_sysctl_root),
548 OID_AUTO, "precache", CTLFLAG_RW,
550 "Where should we precache the mcopy (0 is not at all)");
551 SYSCTL_ADD_S32(&rack_sysctl_ctx,
552 SYSCTL_CHILDREN(rack_sysctl_root),
553 OID_AUTO, "sblklimit", CTLFLAG_RW,
554 &rack_sack_block_limit, 128,
555 "When do we start paying attention to small sack blocks");
556 SYSCTL_ADD_S32(&rack_sysctl_ctx,
557 SYSCTL_CHILDREN(rack_sysctl_root),
558 OID_AUTO, "send_oldest", CTLFLAG_RW,
559 &rack_always_send_oldest, 1,
560 "Should we always send the oldest TLP and RACK-TLP");
561 SYSCTL_ADD_S32(&rack_sysctl_ctx,
562 SYSCTL_CHILDREN(rack_sysctl_root),
563 OID_AUTO, "rack_tlp_in_recovery", CTLFLAG_RW,
564 &rack_tlp_in_recovery, 1,
565 "Can we do a TLP during recovery?");
566 SYSCTL_ADD_S32(&rack_sysctl_ctx,
567 SYSCTL_CHILDREN(rack_sysctl_root),
568 OID_AUTO, "rack_tlimit", CTLFLAG_RW,
569 &rack_limited_retran, 0,
570 "How many times can a rack timeout drive out sends");
571 SYSCTL_ADD_S32(&rack_sysctl_ctx,
572 SYSCTL_CHILDREN(rack_sysctl_root),
573 OID_AUTO, "minrto", CTLFLAG_RW,
575 "Minimum RTO in ms -- set with caution below 1000 due to TLP");
576 SYSCTL_ADD_S32(&rack_sysctl_ctx,
577 SYSCTL_CHILDREN(rack_sysctl_root),
578 OID_AUTO, "maxrto", CTLFLAG_RW,
580 "Maxiumum RTO in ms -- should be at least as large as min_rto");
581 SYSCTL_ADD_S32(&rack_sysctl_ctx,
582 SYSCTL_CHILDREN(rack_sysctl_root),
583 OID_AUTO, "tlp_retry", CTLFLAG_RW,
584 &rack_tlp_max_resend, 2,
585 "How many times does TLP retry a single segment or multiple with no ACK");
586 SYSCTL_ADD_S32(&rack_sysctl_ctx,
587 SYSCTL_CHILDREN(rack_sysctl_root),
588 OID_AUTO, "recovery_loss_prop", CTLFLAG_RW,
589 &rack_use_proportional_reduce, 0,
590 "Should we proportionaly reduce cwnd based on the number of losses ");
591 SYSCTL_ADD_S32(&rack_sysctl_ctx,
592 SYSCTL_CHILDREN(rack_sysctl_root),
593 OID_AUTO, "recovery_prop", CTLFLAG_RW,
594 &rack_proportional_rate, 10,
595 "What percent reduction per loss");
596 SYSCTL_ADD_S32(&rack_sysctl_ctx,
597 SYSCTL_CHILDREN(rack_sysctl_root),
598 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
599 &rack_lower_cwnd_at_tlp, 0,
600 "When a TLP completes a retran should we enter recovery?");
601 SYSCTL_ADD_S32(&rack_sysctl_ctx,
602 SYSCTL_CHILDREN(rack_sysctl_root),
603 OID_AUTO, "hptsi_reduces", CTLFLAG_RW,
604 &rack_slot_reduction, 4,
605 "When setting a slot should we reduce by divisor");
606 SYSCTL_ADD_S32(&rack_sysctl_ctx,
607 SYSCTL_CHILDREN(rack_sysctl_root),
608 OID_AUTO, "hptsi_every_seg", CTLFLAG_RW,
609 &rack_pace_every_seg, 1,
610 "Should we pace out every segment hptsi");
611 SYSCTL_ADD_S32(&rack_sysctl_ctx,
612 SYSCTL_CHILDREN(rack_sysctl_root),
613 OID_AUTO, "hptsi_seg_max", CTLFLAG_RW,
614 &rack_hptsi_segments, 6,
615 "Should we pace out only a limited size of segments");
616 SYSCTL_ADD_S32(&rack_sysctl_ctx,
617 SYSCTL_CHILDREN(rack_sysctl_root),
618 OID_AUTO, "prr_sendalot", CTLFLAG_RW,
619 &rack_send_a_lot_in_prr, 1,
620 "Send a lot in prr");
621 SYSCTL_ADD_S32(&rack_sysctl_ctx,
622 SYSCTL_CHILDREN(rack_sysctl_root),
623 OID_AUTO, "minto", CTLFLAG_RW,
625 "Minimum rack timeout in milliseconds");
626 SYSCTL_ADD_S32(&rack_sysctl_ctx,
627 SYSCTL_CHILDREN(rack_sysctl_root),
628 OID_AUTO, "earlyrecoveryseg", CTLFLAG_RW,
629 &rack_early_recovery_max_seg, 6,
630 "Max segments in early recovery");
631 SYSCTL_ADD_S32(&rack_sysctl_ctx,
632 SYSCTL_CHILDREN(rack_sysctl_root),
633 OID_AUTO, "earlyrecovery", CTLFLAG_RW,
634 &rack_early_recovery, 1,
635 "Do we do early recovery with rack");
636 SYSCTL_ADD_S32(&rack_sysctl_ctx,
637 SYSCTL_CHILDREN(rack_sysctl_root),
638 OID_AUTO, "reorder_thresh", CTLFLAG_RW,
639 &rack_reorder_thresh, 2,
640 "What factor for rack will be added when seeing reordering (shift right)");
641 SYSCTL_ADD_S32(&rack_sysctl_ctx,
642 SYSCTL_CHILDREN(rack_sysctl_root),
643 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
645 "what divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
646 SYSCTL_ADD_S32(&rack_sysctl_ctx,
647 SYSCTL_CHILDREN(rack_sysctl_root),
648 OID_AUTO, "reorder_fade", CTLFLAG_RW,
649 &rack_reorder_fade, 0,
650 "Does reorder detection fade, if so how many ms (0 means never)");
651 SYSCTL_ADD_S32(&rack_sysctl_ctx,
652 SYSCTL_CHILDREN(rack_sysctl_root),
653 OID_AUTO, "pktdelay", CTLFLAG_RW,
655 "Extra RACK time (in ms) besides reordering thresh");
656 SYSCTL_ADD_S32(&rack_sysctl_ctx,
657 SYSCTL_CHILDREN(rack_sysctl_root),
658 OID_AUTO, "inc_var", CTLFLAG_RW,
660 "Should rack add to the TLP timer the variance in rtt calculation");
661 rack_badfr = counter_u64_alloc(M_WAITOK);
662 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
663 SYSCTL_CHILDREN(rack_sysctl_root),
664 OID_AUTO, "badfr", CTLFLAG_RD,
665 &rack_badfr, "Total number of bad FRs");
666 rack_badfr_bytes = counter_u64_alloc(M_WAITOK);
667 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
668 SYSCTL_CHILDREN(rack_sysctl_root),
669 OID_AUTO, "badfr_bytes", CTLFLAG_RD,
670 &rack_badfr_bytes, "Total number of bad FRs");
671 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK);
672 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
673 SYSCTL_CHILDREN(rack_sysctl_root),
674 OID_AUTO, "prrsndret", CTLFLAG_RD,
675 &rack_rtm_prr_retran,
676 "Total number of prr based retransmits");
677 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK);
678 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
679 SYSCTL_CHILDREN(rack_sysctl_root),
680 OID_AUTO, "prrsndnew", CTLFLAG_RD,
681 &rack_rtm_prr_newdata,
682 "Total number of prr based new transmits");
683 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK);
684 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
685 SYSCTL_CHILDREN(rack_sysctl_root),
686 OID_AUTO, "tsnf", CTLFLAG_RD,
687 &rack_timestamp_mismatch,
688 "Total number of timestamps that we could not find the reported ts");
689 rack_find_high = counter_u64_alloc(M_WAITOK);
690 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
691 SYSCTL_CHILDREN(rack_sysctl_root),
692 OID_AUTO, "findhigh", CTLFLAG_RD,
694 "Total number of FIN causing find-high");
695 rack_reorder_seen = counter_u64_alloc(M_WAITOK);
696 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
697 SYSCTL_CHILDREN(rack_sysctl_root),
698 OID_AUTO, "reordering", CTLFLAG_RD,
700 "Total number of times we added delay due to reordering");
701 rack_tlp_tot = counter_u64_alloc(M_WAITOK);
702 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
703 SYSCTL_CHILDREN(rack_sysctl_root),
704 OID_AUTO, "tlp_to_total", CTLFLAG_RD,
706 "Total number of tail loss probe expirations");
707 rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
708 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
709 SYSCTL_CHILDREN(rack_sysctl_root),
710 OID_AUTO, "tlp_new", CTLFLAG_RD,
712 "Total number of tail loss probe sending new data");
714 rack_tlp_retran = counter_u64_alloc(M_WAITOK);
715 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
716 SYSCTL_CHILDREN(rack_sysctl_root),
717 OID_AUTO, "tlp_retran", CTLFLAG_RD,
719 "Total number of tail loss probe sending retransmitted data");
720 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
721 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
722 SYSCTL_CHILDREN(rack_sysctl_root),
723 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
724 &rack_tlp_retran_bytes,
725 "Total bytes of tail loss probe sending retransmitted data");
726 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK);
727 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
728 SYSCTL_CHILDREN(rack_sysctl_root),
729 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD,
730 &rack_tlp_retran_fail,
731 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)");
732 rack_to_tot = counter_u64_alloc(M_WAITOK);
733 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
734 SYSCTL_CHILDREN(rack_sysctl_root),
735 OID_AUTO, "rack_to_tot", CTLFLAG_RD,
737 "Total number of times the rack to expired?");
738 rack_to_arm_rack = counter_u64_alloc(M_WAITOK);
739 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
740 SYSCTL_CHILDREN(rack_sysctl_root),
741 OID_AUTO, "arm_rack", CTLFLAG_RD,
743 "Total number of times the rack timer armed?");
744 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK);
745 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
746 SYSCTL_CHILDREN(rack_sysctl_root),
747 OID_AUTO, "arm_tlp", CTLFLAG_RD,
749 "Total number of times the tlp timer armed?");
750 rack_paced_segments = counter_u64_alloc(M_WAITOK);
751 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
752 SYSCTL_CHILDREN(rack_sysctl_root),
753 OID_AUTO, "paced", CTLFLAG_RD,
754 &rack_paced_segments,
755 "Total number of times a segment send caused hptsi");
756 rack_unpaced_segments = counter_u64_alloc(M_WAITOK);
757 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
758 SYSCTL_CHILDREN(rack_sysctl_root),
759 OID_AUTO, "unpaced", CTLFLAG_RD,
760 &rack_unpaced_segments,
761 "Total number of times a segment did not cause hptsi");
762 rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
763 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
764 SYSCTL_CHILDREN(rack_sysctl_root),
765 OID_AUTO, "saw_enobufs", CTLFLAG_RD,
767 "Total number of times a segment did not cause hptsi");
768 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
769 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
770 SYSCTL_CHILDREN(rack_sysctl_root),
771 OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
772 &rack_saw_enetunreach,
773 "Total number of times a segment did not cause hptsi");
774 rack_to_alloc = counter_u64_alloc(M_WAITOK);
775 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
776 SYSCTL_CHILDREN(rack_sysctl_root),
777 OID_AUTO, "allocs", CTLFLAG_RD,
779 "Total allocations of tracking structures");
780 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
781 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
782 SYSCTL_CHILDREN(rack_sysctl_root),
783 OID_AUTO, "allochard", CTLFLAG_RD,
785 "Total allocations done with sleeping the hard way");
786 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
787 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
788 SYSCTL_CHILDREN(rack_sysctl_root),
789 OID_AUTO, "allocemerg", CTLFLAG_RD,
790 &rack_to_alloc_emerg,
791 "Total allocations done from emergency cache");
792 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK);
793 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
794 SYSCTL_CHILDREN(rack_sysctl_root),
795 OID_AUTO, "alloc_limited", CTLFLAG_RD,
796 &rack_to_alloc_limited,
797 "Total allocations dropped due to limit");
798 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK);
799 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
800 SYSCTL_CHILDREN(rack_sysctl_root),
801 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD,
802 &rack_alloc_limited_conns,
803 "Connections with allocations dropped due to limit");
804 rack_split_limited = counter_u64_alloc(M_WAITOK);
805 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
806 SYSCTL_CHILDREN(rack_sysctl_root),
807 OID_AUTO, "split_limited", CTLFLAG_RD,
809 "Split allocations dropped due to limit");
810 rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
811 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
812 SYSCTL_CHILDREN(rack_sysctl_root),
813 OID_AUTO, "sack_long", CTLFLAG_RD,
815 "Total times we had to walk whole list for sack processing");
817 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
818 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
819 SYSCTL_CHILDREN(rack_sysctl_root),
820 OID_AUTO, "sack_restart", CTLFLAG_RD,
821 &rack_sack_proc_restart,
822 "Total times we had to walk whole list due to a restart");
823 rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
824 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
825 SYSCTL_CHILDREN(rack_sysctl_root),
826 OID_AUTO, "sack_short", CTLFLAG_RD,
827 &rack_sack_proc_short,
828 "Total times we took shortcut for sack processing");
829 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK);
830 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
831 SYSCTL_CHILDREN(rack_sysctl_root),
832 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD,
833 &rack_enter_tlp_calc,
834 "Total times we called calc-tlp");
835 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK);
836 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
837 SYSCTL_CHILDREN(rack_sysctl_root),
838 OID_AUTO, "hit_tlp_method", CTLFLAG_RD,
839 &rack_used_tlpmethod,
840 "Total number of runt sacks");
841 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK);
842 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
843 SYSCTL_CHILDREN(rack_sysctl_root),
844 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD,
845 &rack_used_tlpmethod2,
846 "Total number of runt sacks 2");
847 rack_runt_sacks = counter_u64_alloc(M_WAITOK);
848 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
849 SYSCTL_CHILDREN(rack_sysctl_root),
850 OID_AUTO, "runtsacks", CTLFLAG_RD,
852 "Total number of runt sacks");
853 rack_progress_drops = counter_u64_alloc(M_WAITOK);
854 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
855 SYSCTL_CHILDREN(rack_sysctl_root),
856 OID_AUTO, "prog_drops", CTLFLAG_RD,
857 &rack_progress_drops,
858 "Total number of progress drops");
859 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
860 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
861 SYSCTL_CHILDREN(rack_sysctl_root),
862 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
863 &rack_input_idle_reduces,
864 "Total number of idle reductions on input");
865 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK);
866 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
867 SYSCTL_CHILDREN(rack_sysctl_root),
868 OID_AUTO, "tlp_nada", CTLFLAG_RD,
870 "Total number of nada tlp calls");
871 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
872 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
873 OID_AUTO, "outsize", CTLFLAG_RD,
874 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
875 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
876 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
877 OID_AUTO, "opts", CTLFLAG_RD,
878 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
879 SYSCTL_ADD_PROC(&rack_sysctl_ctx,
880 SYSCTL_CHILDREN(rack_sysctl_root),
881 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
882 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
885 static inline int32_t
886 rack_progress_timeout_check(struct tcpcb *tp)
888 #ifdef NETFLIX_PROGRESS
889 if (tp->t_maxunacktime && tp->t_acktime && TSTMP_GT(ticks, tp->t_acktime)) {
890 if ((ticks - tp->t_acktime) >= tp->t_maxunacktime) {
892 * There is an assumption that the caller
893 * will drop the connection so we will
894 * increment the counters here.
896 struct tcp_rack *rack;
897 rack = (struct tcp_rack *)tp->t_fb_ptr;
898 counter_u64_add(rack_progress_drops, 1);
899 TCPSTAT_INC(tcps_progdrops);
900 rack_log_progress_event(rack, tp, ticks, PROGRESS_DROP, __LINE__);
910 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
912 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
913 union tcp_log_stackspecific log;
915 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
916 log.u_bbr.flex1 = TICKS_2_MSEC(rack->rc_tp->t_srtt >> TCP_RTT_SHIFT);
917 log.u_bbr.flex2 = to;
918 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
919 log.u_bbr.flex4 = slot;
920 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot;
921 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
922 log.u_bbr.flex8 = which;
923 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
924 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
925 TCP_LOG_EVENT(rack->rc_tp, NULL,
926 &rack->rc_inp->inp_socket->so_rcv,
927 &rack->rc_inp->inp_socket->so_snd,
928 BBR_LOG_TIMERSTAR, 0,
934 rack_log_to_event(struct tcp_rack *rack, int32_t to_num)
936 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
937 union tcp_log_stackspecific log;
939 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
940 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
941 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
942 log.u_bbr.flex8 = to_num;
943 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
944 log.u_bbr.flex2 = rack->rc_rack_rtt;
945 TCP_LOG_EVENT(rack->rc_tp, NULL,
946 &rack->rc_inp->inp_socket->so_rcv,
947 &rack->rc_inp->inp_socket->so_snd,
954 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, int32_t t,
955 uint32_t o_srtt, uint32_t o_var)
957 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
958 union tcp_log_stackspecific log;
960 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
961 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
962 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
964 log.u_bbr.flex2 = o_srtt;
965 log.u_bbr.flex3 = o_var;
966 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
967 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;
968 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_rtt_cnt;
969 log.u_bbr.rttProp = rack->r_ctl.rack_rs.rs_rtt_tot;
970 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
971 TCP_LOG_EVENT(tp, NULL,
972 &rack->rc_inp->inp_socket->so_rcv,
973 &rack->rc_inp->inp_socket->so_snd,
980 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
983 * Log the rtt sample we are
984 * applying to the srtt algorithm in
987 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
988 union tcp_log_stackspecific log;
991 memset(&log, 0, sizeof(log));
992 /* Convert our ms to a microsecond */
993 log.u_bbr.flex1 = rtt * 1000;
994 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
995 TCP_LOG_EVENTP(rack->rc_tp, NULL,
996 &rack->rc_inp->inp_socket->so_rcv,
997 &rack->rc_inp->inp_socket->so_snd,
999 0, &log, false, &tv);
1005 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line)
1007 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
1008 union tcp_log_stackspecific log;
1010 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1011 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1012 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1013 log.u_bbr.flex1 = line;
1014 log.u_bbr.flex2 = tick;
1015 log.u_bbr.flex3 = tp->t_maxunacktime;
1016 log.u_bbr.flex4 = tp->t_acktime;
1017 log.u_bbr.flex8 = event;
1018 TCP_LOG_EVENT(tp, NULL,
1019 &rack->rc_inp->inp_socket->so_rcv,
1020 &rack->rc_inp->inp_socket->so_snd,
1021 BBR_LOG_PROGRESS, 0,
1027 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts)
1029 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1030 union tcp_log_stackspecific log;
1032 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1033 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1034 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1035 log.u_bbr.flex1 = slot;
1036 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
1037 log.u_bbr.flex8 = rack->rc_in_persist;
1038 TCP_LOG_EVENT(rack->rc_tp, NULL,
1039 &rack->rc_inp->inp_socket->so_rcv,
1040 &rack->rc_inp->inp_socket->so_snd,
1047 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out)
1049 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1050 union tcp_log_stackspecific log;
1052 memset(&log, 0, sizeof(log));
1053 log.u_bbr.flex1 = did_out;
1054 log.u_bbr.flex2 = nxt_pkt;
1055 log.u_bbr.flex3 = way_out;
1056 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
1057 log.u_bbr.flex7 = rack->r_wanted_output;
1058 log.u_bbr.flex8 = rack->rc_in_persist;
1059 TCP_LOG_EVENT(rack->rc_tp, NULL,
1060 &rack->rc_inp->inp_socket->so_rcv,
1061 &rack->rc_inp->inp_socket->so_snd,
1062 BBR_LOG_DOSEG_DONE, 0,
1069 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, uint8_t hpts_calling)
1071 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1072 union tcp_log_stackspecific log;
1074 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1075 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1076 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1077 log.u_bbr.flex1 = slot;
1078 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
1079 log.u_bbr.flex7 = hpts_calling;
1080 log.u_bbr.flex8 = rack->rc_in_persist;
1081 TCP_LOG_EVENT(rack->rc_tp, NULL,
1082 &rack->rc_inp->inp_socket->so_rcv,
1083 &rack->rc_inp->inp_socket->so_snd,
1090 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line)
1092 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1093 union tcp_log_stackspecific log;
1095 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1096 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1097 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1098 log.u_bbr.flex1 = line;
1099 log.u_bbr.flex2 = 0;
1100 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
1101 log.u_bbr.flex4 = 0;
1102 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
1103 log.u_bbr.flex8 = hpts_removed;
1104 TCP_LOG_EVENT(rack->rc_tp, NULL,
1105 &rack->rc_inp->inp_socket->so_rcv,
1106 &rack->rc_inp->inp_socket->so_snd,
1107 BBR_LOG_TIMERCANC, 0,
1113 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
1115 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1116 union tcp_log_stackspecific log;
1118 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1119 log.u_bbr.flex1 = timers;
1120 log.u_bbr.flex2 = ret;
1121 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
1122 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
1123 log.u_bbr.flex5 = cts;
1124 TCP_LOG_EVENT(rack->rc_tp, NULL,
1125 &rack->rc_inp->inp_socket->so_rcv,
1126 &rack->rc_inp->inp_socket->so_snd,
1127 BBR_LOG_TO_PROCESS, 0,
1133 rack_counter_destroy(void)
1135 counter_u64_free(rack_badfr);
1136 counter_u64_free(rack_badfr_bytes);
1137 counter_u64_free(rack_rtm_prr_retran);
1138 counter_u64_free(rack_rtm_prr_newdata);
1139 counter_u64_free(rack_timestamp_mismatch);
1140 counter_u64_free(rack_reorder_seen);
1141 counter_u64_free(rack_tlp_tot);
1142 counter_u64_free(rack_tlp_newdata);
1143 counter_u64_free(rack_tlp_retran);
1144 counter_u64_free(rack_tlp_retran_bytes);
1145 counter_u64_free(rack_tlp_retran_fail);
1146 counter_u64_free(rack_to_tot);
1147 counter_u64_free(rack_to_arm_rack);
1148 counter_u64_free(rack_to_arm_tlp);
1149 counter_u64_free(rack_paced_segments);
1150 counter_u64_free(rack_unpaced_segments);
1151 counter_u64_free(rack_saw_enobuf);
1152 counter_u64_free(rack_saw_enetunreach);
1153 counter_u64_free(rack_to_alloc_hard);
1154 counter_u64_free(rack_to_alloc_emerg);
1155 counter_u64_free(rack_sack_proc_all);
1156 counter_u64_free(rack_sack_proc_short);
1157 counter_u64_free(rack_sack_proc_restart);
1158 counter_u64_free(rack_to_alloc);
1159 counter_u64_free(rack_to_alloc_limited);
1160 counter_u64_free(rack_split_limited);
1161 counter_u64_free(rack_find_high);
1162 counter_u64_free(rack_runt_sacks);
1163 counter_u64_free(rack_enter_tlp_calc);
1164 counter_u64_free(rack_used_tlpmethod);
1165 counter_u64_free(rack_used_tlpmethod2);
1166 counter_u64_free(rack_progress_drops);
1167 counter_u64_free(rack_input_idle_reduces);
1168 counter_u64_free(rack_tlp_does_nada);
1169 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
1170 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
1173 static struct rack_sendmap *
1174 rack_alloc(struct tcp_rack *rack)
1176 struct rack_sendmap *rsm;
1178 rsm = uma_zalloc(rack_zone, M_NOWAIT);
1180 rack->r_ctl.rc_num_maps_alloced++;
1181 counter_u64_add(rack_to_alloc, 1);
1184 if (rack->rc_free_cnt) {
1185 counter_u64_add(rack_to_alloc_emerg, 1);
1186 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
1187 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_next);
1188 rack->rc_free_cnt--;
1194 static struct rack_sendmap *
1195 rack_alloc_full_limit(struct tcp_rack *rack)
1197 if ((rack_map_entries_limit > 0) &&
1198 (rack->r_ctl.rc_num_maps_alloced >= rack_map_entries_limit)) {
1199 counter_u64_add(rack_to_alloc_limited, 1);
1200 if (!rack->alloc_limit_reported) {
1201 rack->alloc_limit_reported = 1;
1202 counter_u64_add(rack_alloc_limited_conns, 1);
1206 return (rack_alloc(rack));
1209 /* wrapper to allocate a sendmap entry, subject to a specific limit */
1210 static struct rack_sendmap *
1211 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type)
1213 struct rack_sendmap *rsm;
1216 /* currently there is only one limit type */
1217 if (rack_map_split_limit > 0 &&
1218 rack->r_ctl.rc_num_split_allocs >= rack_map_split_limit) {
1219 counter_u64_add(rack_split_limited, 1);
1220 if (!rack->alloc_limit_reported) {
1221 rack->alloc_limit_reported = 1;
1222 counter_u64_add(rack_alloc_limited_conns, 1);
1228 /* allocate and mark in the limit type, if set */
1229 rsm = rack_alloc(rack);
1230 if (rsm != NULL && limit_type) {
1231 rsm->r_limit_type = limit_type;
1232 rack->r_ctl.rc_num_split_allocs++;
1238 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
1240 if (rsm->r_limit_type) {
1241 /* currently there is only one limit type */
1242 rack->r_ctl.rc_num_split_allocs--;
1244 if (rack->r_ctl.rc_tlpsend == rsm)
1245 rack->r_ctl.rc_tlpsend = NULL;
1246 if (rack->r_ctl.rc_next == rsm)
1247 rack->r_ctl.rc_next = NULL;
1248 if (rack->r_ctl.rc_sacklast == rsm)
1249 rack->r_ctl.rc_sacklast = NULL;
1250 if (rack->rc_free_cnt < rack_free_cache) {
1251 memset(rsm, 0, sizeof(struct rack_sendmap));
1252 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_next);
1253 rsm->r_limit_type = 0;
1254 rack->rc_free_cnt++;
1257 rack->r_ctl.rc_num_maps_alloced--;
1258 uma_zfree(rack_zone, rsm);
1262 * CC wrapper hook functions
1265 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, struct tcphdr *th, uint16_t nsegs,
1266 uint16_t type, int32_t recovery)
1268 #ifdef NETFLIX_STATS
1272 INP_WLOCK_ASSERT(tp->t_inpcb);
1274 tp->ccv->nsegs = nsegs;
1275 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
1276 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
1279 max = rack->r_ctl.rc_early_recovery_segs * tp->t_maxseg;
1280 if (tp->ccv->bytes_this_ack > max) {
1281 tp->ccv->bytes_this_ack = max;
1284 if (tp->snd_cwnd <= tp->snd_wnd)
1285 tp->ccv->flags |= CCF_CWND_LIMITED;
1287 tp->ccv->flags &= ~CCF_CWND_LIMITED;
1289 if (type == CC_ACK) {
1290 #ifdef NETFLIX_STATS
1291 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
1292 ((int32_t) tp->snd_cwnd) - tp->snd_wnd);
1293 if ((tp->t_flags & TF_GPUTINPROG) &&
1294 SEQ_GEQ(th->th_ack, tp->gput_ack)) {
1295 gput = (((int64_t) (th->th_ack - tp->gput_seq)) << 3) /
1296 max(1, tcp_ts_getticks() - tp->gput_ts);
1297 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
1300 * XXXLAS: This is a temporary hack, and should be
1301 * chained off VOI_TCP_GPUT when stats(9) grows an
1302 * API to deal with chained VOIs.
1304 if (tp->t_stats_gput_prev > 0)
1305 stats_voi_update_abs_s32(tp->t_stats,
1307 ((gput - tp->t_stats_gput_prev) * 100) /
1308 tp->t_stats_gput_prev);
1309 tp->t_flags &= ~TF_GPUTINPROG;
1310 tp->t_stats_gput_prev = gput;
1311 if (tp->t_maxpeakrate) {
1313 * We update t_peakrate_thr. This gives us roughly
1314 * one update per round trip time.
1316 tcp_update_peakrate_thr(tp);
1320 if (tp->snd_cwnd > tp->snd_ssthresh) {
1321 tp->t_bytes_acked += tp->ccv->bytes_this_ack;
1322 if (tp->t_bytes_acked >= tp->snd_cwnd) {
1323 tp->t_bytes_acked -= tp->snd_cwnd;
1324 tp->ccv->flags |= CCF_ABC_SENTAWND;
1327 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
1328 tp->t_bytes_acked = 0;
1331 if (CC_ALGO(tp)->ack_received != NULL) {
1332 /* XXXLAS: Find a way to live without this */
1333 tp->ccv->curack = th->th_ack;
1334 CC_ALGO(tp)->ack_received(tp->ccv, type);
1336 #ifdef NETFLIX_STATS
1337 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd);
1339 if (rack->r_ctl.rc_rack_largest_cwnd < tp->snd_cwnd) {
1340 rack->r_ctl.rc_rack_largest_cwnd = tp->snd_cwnd;
1342 /* we enforce max peak rate if it is set. */
1343 if (tp->t_peakrate_thr && tp->snd_cwnd > tp->t_peakrate_thr) {
1344 tp->snd_cwnd = tp->t_peakrate_thr;
1349 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th)
1351 struct tcp_rack *rack;
1353 rack = (struct tcp_rack *)tp->t_fb_ptr;
1354 INP_WLOCK_ASSERT(tp->t_inpcb);
1355 if (rack->r_ctl.rc_prr_sndcnt > 0)
1356 rack->r_wanted_output++;
1360 rack_post_recovery(struct tcpcb *tp, struct tcphdr *th)
1362 struct tcp_rack *rack;
1364 INP_WLOCK_ASSERT(tp->t_inpcb);
1365 rack = (struct tcp_rack *)tp->t_fb_ptr;
1366 if (CC_ALGO(tp)->post_recovery != NULL) {
1367 tp->ccv->curack = th->th_ack;
1368 CC_ALGO(tp)->post_recovery(tp->ccv);
1371 * Here we can in theory adjust cwnd to be based on the number of
1372 * losses in the window (rack->r_ctl.rc_loss_count). This is done
1373 * based on the rack_use_proportional flag.
1375 if (rack->r_ctl.rc_prop_reduce && rack->r_ctl.rc_prop_rate) {
1378 reduce = (rack->r_ctl.rc_loss_count * rack->r_ctl.rc_prop_rate);
1382 tp->snd_cwnd -= ((reduce * tp->snd_cwnd) / 100);
1384 if (tp->snd_cwnd > tp->snd_ssthresh) {
1385 /* Drop us down to the ssthresh (1/2 cwnd at loss) */
1386 tp->snd_cwnd = tp->snd_ssthresh;
1389 if (rack->r_ctl.rc_prr_sndcnt > 0) {
1390 /* Suck the next prr cnt back into cwnd */
1391 tp->snd_cwnd += rack->r_ctl.rc_prr_sndcnt;
1392 rack->r_ctl.rc_prr_sndcnt = 0;
1394 tp->snd_recover = tp->snd_una;
1395 EXIT_RECOVERY(tp->t_flags);
1399 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
1401 struct tcp_rack *rack;
1403 INP_WLOCK_ASSERT(tp->t_inpcb);
1405 rack = (struct tcp_rack *)tp->t_fb_ptr;
1408 /* rack->r_ctl.rc_ssthresh_set = 1;*/
1409 if (!IN_FASTRECOVERY(tp->t_flags)) {
1410 rack->r_ctl.rc_tlp_rtx_out = 0;
1411 rack->r_ctl.rc_prr_delivered = 0;
1412 rack->r_ctl.rc_prr_out = 0;
1413 rack->r_ctl.rc_loss_count = 0;
1414 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
1415 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
1416 tp->snd_recover = tp->snd_max;
1417 if (tp->t_flags & TF_ECN_PERMIT)
1418 tp->t_flags |= TF_ECN_SND_CWR;
1422 if (!IN_CONGRECOVERY(tp->t_flags) ||
1424 * Allow ECN reaction on ACK to CWR, if
1425 * that data segment was also CE marked.
1427 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
1428 EXIT_CONGRECOVERY(tp->t_flags);
1429 TCPSTAT_INC(tcps_ecn_rcwnd);
1430 tp->snd_recover = tp->snd_max + 1;
1431 if (tp->t_flags & TF_ECN_PERMIT)
1432 tp->t_flags |= TF_ECN_SND_CWR;
1437 tp->t_bytes_acked = 0;
1438 EXIT_RECOVERY(tp->t_flags);
1439 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
1440 tp->t_maxseg) * tp->t_maxseg;
1441 tp->snd_cwnd = tp->t_maxseg;
1442 if (tp->t_flags & TF_ECN_PERMIT)
1443 tp->t_flags |= TF_ECN_SND_CWR;
1446 TCPSTAT_INC(tcps_sndrexmitbad);
1447 /* RTO was unnecessary, so reset everything. */
1448 tp->snd_cwnd = tp->snd_cwnd_prev;
1449 tp->snd_ssthresh = tp->snd_ssthresh_prev;
1450 tp->snd_recover = tp->snd_recover_prev;
1451 if (tp->t_flags & TF_WASFRECOVERY)
1452 ENTER_FASTRECOVERY(tp->t_flags);
1453 if (tp->t_flags & TF_WASCRECOVERY)
1454 ENTER_CONGRECOVERY(tp->t_flags);
1455 tp->snd_nxt = tp->snd_max;
1456 tp->t_badrxtwin = 0;
1460 if (CC_ALGO(tp)->cong_signal != NULL) {
1462 tp->ccv->curack = th->th_ack;
1463 CC_ALGO(tp)->cong_signal(tp->ccv, type);
1470 rack_cc_after_idle(struct tcpcb *tp, int reduce_largest)
1474 INP_WLOCK_ASSERT(tp->t_inpcb);
1476 #ifdef NETFLIX_STATS
1477 TCPSTAT_INC(tcps_idle_restarts);
1478 if (tp->t_state == TCPS_ESTABLISHED)
1479 TCPSTAT_INC(tcps_idle_estrestarts);
1481 if (CC_ALGO(tp)->after_idle != NULL)
1482 CC_ALGO(tp)->after_idle(tp->ccv);
1484 if (tp->snd_cwnd == 1)
1485 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
1486 else if (V_tcp_initcwnd_segments)
1487 i_cwnd = min((V_tcp_initcwnd_segments * tp->t_maxseg),
1488 max(2 * tp->t_maxseg, V_tcp_initcwnd_segments * 1460));
1489 else if (V_tcp_do_rfc3390)
1490 i_cwnd = min(4 * tp->t_maxseg,
1491 max(2 * tp->t_maxseg, 4380));
1493 /* Per RFC5681 Section 3.1 */
1494 if (tp->t_maxseg > 2190)
1495 i_cwnd = 2 * tp->t_maxseg;
1496 else if (tp->t_maxseg > 1095)
1497 i_cwnd = 3 * tp->t_maxseg;
1499 i_cwnd = 4 * tp->t_maxseg;
1501 if (reduce_largest) {
1503 * Do we reduce the largest cwnd to make
1504 * rack play nice on restart hptsi wise?
1506 if (((struct tcp_rack *)tp->t_fb_ptr)->r_ctl.rc_rack_largest_cwnd > i_cwnd)
1507 ((struct tcp_rack *)tp->t_fb_ptr)->r_ctl.rc_rack_largest_cwnd = i_cwnd;
1510 * Being idle is no differnt than the initial window. If the cc
1511 * clamps it down below the initial window raise it to the initial
1514 if (tp->snd_cwnd < i_cwnd) {
1515 tp->snd_cwnd = i_cwnd;
1521 * Indicate whether this ack should be delayed. We can delay the ack if
1522 * following conditions are met:
1523 * - There is no delayed ack timer in progress.
1524 * - Our last ack wasn't a 0-sized window. We never want to delay
1525 * the ack that opens up a 0-sized window.
1526 * - LRO wasn't used for this segment. We make sure by checking that the
1527 * segment size is not larger than the MSS.
1528 * - Delayed acks are enabled or this is a half-synchronized T/TCP
1531 #define DELAY_ACK(tp, tlen) \
1532 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
1533 ((tp->t_flags & TF_DELACK) == 0) && \
1534 (tlen <= tp->t_maxseg) && \
1535 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
1538 rack_calc_rwin(struct socket *so, struct tcpcb *tp)
1543 * Calculate amount of space in receive window, and then do TCP
1544 * input processing. Receive window is amount of space in rcv queue,
1545 * but not less than advertised window.
1547 win = sbspace(&so->so_rcv);
1550 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1554 rack_do_drop(struct mbuf *m, struct tcpcb *tp)
1557 * Drop space held by incoming segment and return.
1560 INP_WUNLOCK(tp->t_inpcb);
1566 rack_do_dropwithreset(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t rstreason, int32_t tlen)
1569 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1570 INP_WUNLOCK(tp->t_inpcb);
1572 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1576 * The value in ret_val informs the caller
1577 * if we dropped the tcb (and lock) or not.
1578 * 1 = we dropped it, 0 = the TCB is still locked
1582 rack_do_dropafterack(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t thflags, int32_t tlen, int32_t * ret_val)
1585 * Generate an ACK dropping incoming segment if it occupies sequence
1586 * space, where the ACK reflects our state.
1588 * We can now skip the test for the RST flag since all paths to this
1589 * code happen after packets containing RST have been dropped.
1591 * In the SYN-RECEIVED state, don't send an ACK unless the segment
1592 * we received passes the SYN-RECEIVED ACK test. If it fails send a
1593 * RST. This breaks the loop in the "LAND" DoS attack, and also
1594 * prevents an ACK storm between two listening ports that have been
1595 * sent forged SYN segments, each with the source address of the
1598 struct tcp_rack *rack;
1600 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
1601 (SEQ_GT(tp->snd_una, th->th_ack) ||
1602 SEQ_GT(th->th_ack, tp->snd_max))) {
1604 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
1608 rack = (struct tcp_rack *)tp->t_fb_ptr;
1609 rack->r_wanted_output++;
1610 tp->t_flags |= TF_ACKNOW;
1617 rack_process_rst(struct mbuf *m, struct tcphdr *th, struct socket *so, struct tcpcb *tp)
1620 * RFC5961 Section 3.2
1622 * - RST drops connection only if SEG.SEQ == RCV.NXT. - If RST is in
1623 * window, we send challenge ACK.
1625 * Note: to take into account delayed ACKs, we should test against
1626 * last_ack_sent instead of rcv_nxt. Note 2: we handle special case
1627 * of closed window, not covered by the RFC.
1631 if ((SEQ_GEQ(th->th_seq, (tp->last_ack_sent - 1)) &&
1632 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
1633 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) {
1635 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1636 KASSERT(tp->t_state != TCPS_SYN_SENT,
1637 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p",
1640 if (V_tcp_insecure_rst ||
1641 (tp->last_ack_sent == th->th_seq) ||
1642 (tp->rcv_nxt == th->th_seq) ||
1643 ((tp->last_ack_sent - 1) == th->th_seq)) {
1644 TCPSTAT_INC(tcps_drops);
1645 /* Drop the connection. */
1646 switch (tp->t_state) {
1647 case TCPS_SYN_RECEIVED:
1648 so->so_error = ECONNREFUSED;
1650 case TCPS_ESTABLISHED:
1651 case TCPS_FIN_WAIT_1:
1652 case TCPS_FIN_WAIT_2:
1653 case TCPS_CLOSE_WAIT:
1656 so->so_error = ECONNRESET;
1658 tcp_state_change(tp, TCPS_CLOSED);
1664 rack_do_drop(m, tp);
1666 TCPSTAT_INC(tcps_badrst);
1667 /* Send challenge ACK. */
1668 tcp_respond(tp, mtod(m, void *), th, m,
1669 tp->rcv_nxt, tp->snd_nxt, TH_ACK);
1670 tp->last_ack_sent = tp->rcv_nxt;
1679 * The value in ret_val informs the caller
1680 * if we dropped the tcb (and lock) or not.
1681 * 1 = we dropped it, 0 = the TCB is still locked
1685 rack_challenge_ack(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * ret_val)
1687 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1689 TCPSTAT_INC(tcps_badsyn);
1690 if (V_tcp_insecure_syn &&
1691 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
1692 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
1693 tp = tcp_drop(tp, ECONNRESET);
1695 rack_do_drop(m, tp);
1697 /* Send challenge ACK. */
1698 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt,
1699 tp->snd_nxt, TH_ACK);
1700 tp->last_ack_sent = tp->rcv_nxt;
1703 rack_do_drop(m, NULL);
1708 * rack_ts_check returns 1 for you should not proceed. It places
1709 * in ret_val what should be returned 1/0 by the caller. The 1 indicates
1710 * that the TCB is unlocked and probably dropped. The 0 indicates the
1711 * TCB is still valid and locked.
1714 rack_ts_check(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t tlen, int32_t thflags, int32_t * ret_val)
1717 /* Check to see if ts_recent is over 24 days old. */
1718 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
1720 * Invalidate ts_recent. If this segment updates ts_recent,
1721 * the age will be reset later and ts_recent will get a
1722 * valid value. If it does not, setting ts_recent to zero
1723 * will at least satisfy the requirement that zero be placed
1724 * in the timestamp echo reply when ts_recent isn't valid.
1725 * The age isn't reset until we get a valid ts_recent
1726 * because we don't want out-of-order segments to be dropped
1727 * when ts_recent is old.
1731 TCPSTAT_INC(tcps_rcvduppack);
1732 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
1733 TCPSTAT_INC(tcps_pawsdrop);
1736 rack_do_dropafterack(m, tp, th, thflags, tlen, ret_val);
1738 rack_do_drop(m, NULL);
1746 * rack_drop_checks returns 1 for you should not proceed. It places
1747 * in ret_val what should be returned 1/0 by the caller. The 1 indicates
1748 * that the TCB is unlocked and probably dropped. The 0 indicates the
1749 * TCB is still valid and locked.
1752 rack_drop_checks(struct tcpopt *to, struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * tlenp, int32_t * thf, int32_t * drop_hdrlen, int32_t * ret_val)
1760 todrop = tp->rcv_nxt - th->th_seq;
1762 if (thflags & TH_SYN) {
1772 * Following if statement from Stevens, vol. 2, p. 960.
1775 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
1777 * Any valid FIN must be to the left of the window.
1778 * At this point the FIN must be a duplicate or out
1779 * of sequence; drop it.
1783 * Send an ACK to resynchronize and drop any data.
1784 * But keep on processing for RST or ACK.
1786 tp->t_flags |= TF_ACKNOW;
1788 TCPSTAT_INC(tcps_rcvduppack);
1789 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
1791 TCPSTAT_INC(tcps_rcvpartduppack);
1792 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
1795 * DSACK - add SACK block for dropped range
1797 if (tp->t_flags & TF_SACK_PERMIT) {
1798 tcp_update_sack_list(tp, th->th_seq,
1799 th->th_seq + todrop);
1801 * ACK now, as the next in-sequence segment
1802 * will clear the DSACK block again
1804 tp->t_flags |= TF_ACKNOW;
1806 * ACK now, as the next in-sequence segment
1807 * will clear the DSACK block again
1809 tp->t_flags |= TF_ACKNOW;
1811 *drop_hdrlen += todrop; /* drop from the top afterwards */
1812 th->th_seq += todrop;
1814 if (th->th_urp > todrop)
1815 th->th_urp -= todrop;
1822 * If segment ends after window, drop trailing data (and PUSH and
1823 * FIN); if nothing left, just ACK.
1825 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
1827 TCPSTAT_INC(tcps_rcvpackafterwin);
1828 if (todrop >= tlen) {
1829 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
1831 * If window is closed can only take segments at
1832 * window edge, and have to drop data and PUSH from
1833 * incoming segments. Continue processing, but
1834 * remember to ack. Otherwise, drop segment and
1837 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1838 tp->t_flags |= TF_ACKNOW;
1839 TCPSTAT_INC(tcps_rcvwinprobe);
1841 rack_do_dropafterack(m, tp, th, thflags, tlen, ret_val);
1845 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
1848 thflags &= ~(TH_PUSH | TH_FIN);
1855 static struct rack_sendmap *
1856 rack_find_lowest_rsm(struct tcp_rack *rack)
1858 struct rack_sendmap *rsm;
1861 * Walk the time-order transmitted list looking for an rsm that is
1862 * not acked. This will be the one that was sent the longest time
1863 * ago that is still outstanding.
1865 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
1866 if (rsm->r_flags & RACK_ACKED) {
1875 static struct rack_sendmap *
1876 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
1878 struct rack_sendmap *prsm;
1881 * Walk the sequence order list backward until we hit and arrive at
1882 * the highest seq not acked. In theory when this is called it
1883 * should be the last segment (which it was not).
1885 counter_u64_add(rack_find_high, 1);
1887 TAILQ_FOREACH_REVERSE_FROM(prsm, &rack->r_ctl.rc_map, rack_head, r_next) {
1888 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
1898 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts)
1904 * lro is the flag we use to determine if we have seen reordering.
1905 * If it gets set we have seen reordering. The reorder logic either
1906 * works in one of two ways:
1908 * If reorder-fade is configured, then we track the last time we saw
1909 * re-ordering occur. If we reach the point where enough time as
1910 * passed we no longer consider reordering has occuring.
1912 * Or if reorder-face is 0, then once we see reordering we consider
1913 * the connection to alway be subject to reordering and just set lro
1916 * In the end if lro is non-zero we add the extra time for
1921 if (rack->r_ctl.rc_reorder_ts) {
1922 if (rack->r_ctl.rc_reorder_fade) {
1923 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
1924 lro = cts - rack->r_ctl.rc_reorder_ts;
1927 * No time as passed since the last
1928 * reorder, mark it as reordering.
1933 /* Negative time? */
1936 if (lro > rack->r_ctl.rc_reorder_fade) {
1937 /* Turn off reordering seen too */
1938 rack->r_ctl.rc_reorder_ts = 0;
1942 /* Reodering does not fade */
1948 thresh = srtt + rack->r_ctl.rc_pkt_delay;
1950 /* It must be set, if not you get 1/4 rtt */
1951 if (rack->r_ctl.rc_reorder_shift)
1952 thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
1954 thresh += (srtt >> 2);
1958 /* We don't let the rack timeout be above a RTO */
1960 if (thresh > TICKS_2_MSEC(rack->rc_tp->t_rxtcur)) {
1961 thresh = TICKS_2_MSEC(rack->rc_tp->t_rxtcur);
1963 /* And we don't want it above the RTO max either */
1964 if (thresh > rack_rto_max) {
1965 thresh = rack_rto_max;
1971 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
1972 struct rack_sendmap *rsm, uint32_t srtt)
1974 struct rack_sendmap *prsm;
1975 uint32_t thresh, len;
1980 if (rack->r_ctl.rc_tlp_threshold)
1981 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
1983 thresh = (srtt * 2);
1985 /* Get the previous sent packet, if any */
1986 maxseg = tcp_maxseg(tp);
1987 counter_u64_add(rack_enter_tlp_calc, 1);
1988 len = rsm->r_end - rsm->r_start;
1989 if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
1990 /* Exactly like the ID */
1991 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= maxseg) {
1992 uint32_t alt_thresh;
1994 * Compensate for delayed-ack with the d-ack time.
1996 counter_u64_add(rack_used_tlpmethod, 1);
1997 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
1998 if (alt_thresh > thresh)
1999 thresh = alt_thresh;
2001 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
2003 prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
2004 if (prsm && (len <= maxseg)) {
2006 * Two packets outstanding, thresh should be (2*srtt) +
2007 * possible inter-packet delay (if any).
2009 uint32_t inter_gap = 0;
2012 counter_u64_add(rack_used_tlpmethod, 1);
2013 idx = rsm->r_rtr_cnt - 1;
2014 nidx = prsm->r_rtr_cnt - 1;
2015 if (TSTMP_GEQ(rsm->r_tim_lastsent[nidx], prsm->r_tim_lastsent[idx])) {
2016 /* Yes it was sent later (or at the same time) */
2017 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
2019 thresh += inter_gap;
2020 } else if (len <= maxseg) {
2022 * Possibly compensate for delayed-ack.
2024 uint32_t alt_thresh;
2026 counter_u64_add(rack_used_tlpmethod2, 1);
2027 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
2028 if (alt_thresh > thresh)
2029 thresh = alt_thresh;
2031 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
2033 if (len <= maxseg) {
2034 uint32_t alt_thresh;
2036 * Compensate for delayed-ack with the d-ack time.
2038 counter_u64_add(rack_used_tlpmethod, 1);
2039 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
2040 if (alt_thresh > thresh)
2041 thresh = alt_thresh;
2044 /* Not above an RTO */
2045 if (thresh > TICKS_2_MSEC(tp->t_rxtcur)) {
2046 thresh = TICKS_2_MSEC(tp->t_rxtcur);
2048 /* Not above a RTO max */
2049 if (thresh > rack_rto_max) {
2050 thresh = rack_rto_max;
2052 /* Apply user supplied min TLP */
2053 if (thresh < rack_tlp_min) {
2054 thresh = rack_tlp_min;
2059 static struct rack_sendmap *
2060 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
2063 * Check to see that we don't need to fall into recovery. We will
2064 * need to do so if our oldest transmit is past the time we should
2067 struct tcp_rack *rack;
2068 struct rack_sendmap *rsm;
2070 uint32_t srtt_cur, srtt, thresh;
2072 rack = (struct tcp_rack *)tp->t_fb_ptr;
2073 if (TAILQ_EMPTY(&rack->r_ctl.rc_map)) {
2076 srtt_cur = tp->t_srtt >> TCP_RTT_SHIFT;
2077 srtt = TICKS_2_MSEC(srtt_cur);
2078 if (rack->rc_rack_rtt && (srtt > rack->rc_rack_rtt))
2079 srtt = rack->rc_rack_rtt;
2081 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2085 if (rsm->r_flags & RACK_ACKED) {
2086 rsm = rack_find_lowest_rsm(rack);
2090 idx = rsm->r_rtr_cnt - 1;
2091 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
2092 if (tsused < rsm->r_tim_lastsent[idx]) {
2095 if ((tsused - rsm->r_tim_lastsent[idx]) < thresh) {
2098 /* Ok if we reach here we are over-due */
2099 rack->r_ctl.rc_rsm_start = rsm->r_start;
2100 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
2101 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
2102 rack_cong_signal(tp, NULL, CC_NDUPACK);
2107 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
2113 t = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT) + ((tp->t_rttvar * 4) >> TCP_RTT_SHIFT));
2114 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
2115 tcp_persmin, tcp_persmax);
2116 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
2118 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
2119 ret_val = (uint32_t)tt;
2124 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2127 * Start the FR timer, we do this based on getting the first one in
2128 * the rc_tmap. Note that if its NULL we must stop the timer. in all
2129 * events we need to stop the running timer (if its running) before
2130 * starting the new one.
2132 uint32_t thresh, exp, to, srtt, time_since_sent;
2135 int32_t is_tlp_timer = 0;
2136 struct rack_sendmap *rsm;
2138 if (rack->t_timers_stopped) {
2139 /* All timers have been stopped none are to run */
2142 if (rack->rc_in_persist) {
2143 /* We can't start any timer in persists */
2144 return (rack_get_persists_timer_val(tp, rack));
2146 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2148 /* Nothing on the send map */
2150 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
2151 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
2152 to = TICKS_2_MSEC(tp->t_rxtcur);
2159 if (rsm->r_flags & RACK_ACKED) {
2160 rsm = rack_find_lowest_rsm(rack);
2166 /* Convert from ms to usecs */
2167 if (rsm->r_flags & RACK_SACK_PASSED) {
2168 if ((tp->t_flags & TF_SENTFIN) &&
2169 ((tp->snd_max - tp->snd_una) == 1) &&
2170 (rsm->r_flags & RACK_HAS_FIN)) {
2172 * We don't start a rack timer if all we have is a
2178 srtt_cur = (tp->t_srtt >> TCP_RTT_SHIFT);
2179 srtt = TICKS_2_MSEC(srtt_cur);
2181 srtt = RACK_INITIAL_RTO;
2183 thresh = rack_calc_thresh_rack(rack, srtt, cts);
2184 idx = rsm->r_rtr_cnt - 1;
2185 exp = rsm->r_tim_lastsent[idx] + thresh;
2186 if (SEQ_GEQ(exp, cts)) {
2188 if (to < rack->r_ctl.rc_min_to) {
2189 to = rack->r_ctl.rc_min_to;
2192 to = rack->r_ctl.rc_min_to;
2195 /* Ok we need to do a TLP not RACK */
2196 if ((rack->rc_tlp_in_progress != 0) ||
2197 (rack->r_ctl.rc_tlp_rtx_out != 0)) {
2199 * The previous send was a TLP or a tlp_rtx is in
2204 if ((tp->snd_max - tp->snd_una) > tp->snd_wnd) {
2206 * Peer collapsed rwnd, don't do TLP.
2210 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
2212 /* We found no rsm to TLP with. */
2215 if (rsm->r_flags & RACK_HAS_FIN) {
2216 /* If its a FIN we dont do TLP */
2220 idx = rsm->r_rtr_cnt - 1;
2221 if (TSTMP_GT(cts, rsm->r_tim_lastsent[idx]))
2222 time_since_sent = cts - rsm->r_tim_lastsent[idx];
2224 time_since_sent = 0;
2227 srtt_cur = (tp->t_srtt >> TCP_RTT_SHIFT);
2228 srtt = TICKS_2_MSEC(srtt_cur);
2230 srtt = RACK_INITIAL_RTO;
2231 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
2232 if (thresh > time_since_sent)
2233 to = thresh - time_since_sent;
2235 to = rack->r_ctl.rc_min_to;
2236 if (to > TCPTV_REXMTMAX) {
2238 * If the TLP time works out to larger than the max
2239 * RTO lets not do TLP.. just RTO.
2243 if (rsm->r_start != rack->r_ctl.rc_last_tlp_seq) {
2245 * The tail is no longer the last one I did a probe
2248 rack->r_ctl.rc_tlp_seg_send_cnt = 0;
2249 rack->r_ctl.rc_last_tlp_seq = rsm->r_start;
2252 if (is_tlp_timer == 0) {
2253 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
2255 if ((rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) ||
2256 (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) {
2258 * We have exceeded how many times we can retran the
2259 * current TLP timer, switch to the RTO timer.
2263 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
2272 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2274 if (rack->rc_in_persist == 0) {
2275 if (((tp->t_flags & TF_SENTFIN) == 0) &&
2276 (tp->snd_max - tp->snd_una) >= sbavail(&rack->rc_inp->inp_socket->so_snd))
2277 /* Must need to send more data to enter persist */
2279 rack->r_ctl.rc_went_idle_time = cts;
2280 rack_timer_cancel(tp, rack, cts, __LINE__);
2282 rack->rc_in_persist = 1;
2287 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack)
2289 if (rack->rc_inp->inp_in_hpts) {
2290 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
2291 rack->r_ctl.rc_hpts_flags = 0;
2293 rack->rc_in_persist = 0;
2294 rack->r_ctl.rc_went_idle_time = 0;
2295 tp->t_flags &= ~TF_FORCEDATA;
2300 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, int32_t line,
2301 int32_t slot, uint32_t tot_len_this_send, int32_t frm_out_sbavail)
2304 uint32_t delayed_ack = 0;
2305 uint32_t hpts_timeout;
2310 if (inp->inp_in_hpts) {
2311 /* A previous call is already set up */
2315 if ((tp->t_state == TCPS_CLOSED) ||
2316 (tp->t_state == TCPS_LISTEN)) {
2319 stopped = rack->rc_tmr_stopped;
2320 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
2321 left = rack->r_ctl.rc_timer_exp - cts;
2323 rack->r_ctl.rc_timer_exp = 0;
2324 if (rack->rc_inp->inp_in_hpts == 0) {
2325 rack->r_ctl.rc_hpts_flags = 0;
2328 /* We are hptsi too */
2329 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
2330 } else if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
2332 * We are still left on the hpts when the to goes
2333 * it will be for output.
2335 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts))
2336 slot = rack->r_ctl.rc_last_output_to - cts;
2340 if ((tp->snd_wnd == 0) && TCPS_HAVEESTABLISHED(tp->t_state)) {
2341 /* No send window.. we must enter persist */
2342 rack_enter_persist(tp, rack, cts);
2343 } else if ((frm_out_sbavail &&
2344 (frm_out_sbavail > (tp->snd_max - tp->snd_una)) &&
2345 (tp->snd_wnd < tp->t_maxseg)) &&
2346 TCPS_HAVEESTABLISHED(tp->t_state)) {
2348 * If we have no window or we can't send a segment (and have
2349 * data to send.. we cheat here and frm_out_sbavail is
2350 * passed in with the sbavail(sb) only from bbr_output) and
2351 * we are established, then we must enter persits (if not
2352 * already in persits).
2354 rack_enter_persist(tp, rack, cts);
2356 hpts_timeout = rack_timer_start(tp, rack, cts);
2357 if (tp->t_flags & TF_DELACK) {
2358 delayed_ack = TICKS_2_MSEC(tcp_delacktime);
2359 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
2361 if (delayed_ack && ((hpts_timeout == 0) ||
2362 (delayed_ack < hpts_timeout)))
2363 hpts_timeout = delayed_ack;
2365 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
2367 * If no timers are going to run and we will fall off the hptsi
2368 * wheel, we resort to a keep-alive timer if its configured.
2370 if ((hpts_timeout == 0) &&
2372 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
2373 (tp->t_state <= TCPS_CLOSING)) {
2375 * Ok we have no timer (persists, rack, tlp, rxt or
2376 * del-ack), we don't have segments being paced. So
2377 * all that is left is the keepalive timer.
2379 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
2380 /* Get the established keep-alive time */
2381 hpts_timeout = TP_KEEPIDLE(tp);
2383 /* Get the initial setup keep-alive time */
2384 hpts_timeout = TP_KEEPINIT(tp);
2386 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
2389 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
2390 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
2392 * RACK, TLP, persists and RXT timers all are restartable
2393 * based on actions input .. i.e we received a packet (ack
2394 * or sack) and that changes things (rw, or snd_una etc).
2395 * Thus we can restart them with a new value. For
2396 * keep-alive, delayed_ack we keep track of what was left
2397 * and restart the timer with a smaller value.
2399 if (left < hpts_timeout)
2400 hpts_timeout = left;
2404 * Hack alert for now we can't time-out over 2,147,483
2405 * seconds (a bit more than 596 hours), which is probably ok
2408 if (hpts_timeout > 0x7ffffffe)
2409 hpts_timeout = 0x7ffffffe;
2410 rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
2413 rack->r_ctl.rc_last_output_to = cts + slot;
2414 if ((hpts_timeout == 0) || (hpts_timeout > slot)) {
2415 if (rack->rc_inp->inp_in_hpts == 0)
2416 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(slot));
2417 rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
2420 * Arrange for the hpts to kick back in after the
2421 * t-o if the t-o does not cause a send.
2423 if (rack->rc_inp->inp_in_hpts == 0)
2424 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout));
2425 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
2427 } else if (hpts_timeout) {
2428 if (rack->rc_inp->inp_in_hpts == 0)
2429 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout));
2430 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
2432 /* No timer starting */
2434 if (SEQ_GT(tp->snd_max, tp->snd_una)) {
2435 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
2436 tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
2440 rack->rc_tmr_stopped = 0;
2442 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, cts);
2446 * RACK Timer, here we simply do logging and house keeping.
2447 * the normal rack_output() function will call the
2448 * appropriate thing to check if we need to do a RACK retransmit.
2449 * We return 1, saying don't proceed with rack_output only
2450 * when all timers have been stopped (destroyed PCB?).
2453 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2456 * This timer simply provides an internal trigger to send out data.
2457 * The check_recovery_mode call will see if there are needed
2458 * retransmissions, if so we will enter fast-recovery. The output
2459 * call may or may not do the same thing depending on sysctl
2462 struct rack_sendmap *rsm;
2465 if (tp->t_timers->tt_flags & TT_STOPPED) {
2468 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
2469 /* Its not time yet */
2472 rack_log_to_event(rack, RACK_TO_FRM_RACK);
2473 recovery = IN_RECOVERY(tp->t_flags);
2474 counter_u64_add(rack_to_tot, 1);
2475 if (rack->r_state && (rack->r_state != tp->t_state))
2476 rack_set_state(tp, rack);
2477 rsm = rack_check_recovery_mode(tp, cts);
2481 rtt = rack->rc_rack_rtt;
2484 if ((recovery == 0) &&
2485 (rack->r_ctl.rc_prr_sndcnt < tp->t_maxseg)) {
2487 * The rack-timeout that enter's us into recovery
2488 * will force out one MSS and set us up so that we
2489 * can do one more send in 2*rtt (transitioning the
2490 * rack timeout into a rack-tlp).
2492 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
2493 } else if ((rack->r_ctl.rc_prr_sndcnt < tp->t_maxseg) &&
2494 ((rsm->r_end - rsm->r_start) > rack->r_ctl.rc_prr_sndcnt)) {
2496 * When a rack timer goes, we have to send at
2497 * least one segment. They will be paced a min of 1ms
2498 * apart via the next rack timer (or further
2499 * if the rack timer dictates it).
2501 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
2504 /* This is a case that should happen rarely if ever */
2505 counter_u64_add(rack_tlp_does_nada, 1);
2507 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
2509 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2511 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
2515 static struct rack_sendmap *
2516 rack_merge_rsm(struct tcp_rack *rack,
2517 struct rack_sendmap *l_rsm,
2518 struct rack_sendmap *r_rsm)
2521 * We are merging two ack'd RSM's,
2522 * the l_rsm is on the left (lower seq
2523 * values) and the r_rsm is on the right
2524 * (higher seq value). The simplest way
2525 * to merge these is to move the right
2526 * one into the left. I don't think there
2527 * is any reason we need to try to find
2528 * the oldest (or last oldest retransmitted).
2530 l_rsm->r_end = r_rsm->r_end;
2531 if (r_rsm->r_rtr_bytes)
2532 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes;
2533 if (r_rsm->r_in_tmap) {
2534 /* This really should not happen */
2535 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext);
2538 if (r_rsm->r_flags & RACK_HAS_FIN)
2539 l_rsm->r_flags |= RACK_HAS_FIN;
2540 if (r_rsm->r_flags & RACK_TLP)
2541 l_rsm->r_flags |= RACK_TLP;
2542 TAILQ_REMOVE(&rack->r_ctl.rc_map, r_rsm, r_next);
2543 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) {
2544 /* Transfer the split limit to the map we free */
2545 r_rsm->r_limit_type = l_rsm->r_limit_type;
2546 l_rsm->r_limit_type = 0;
2548 rack_free(rack, r_rsm);
2553 * TLP Timer, here we simply setup what segment we want to
2554 * have the TLP expire on, the normal rack_output() will then
2557 * We return 1, saying don't proceed with rack_output only
2558 * when all timers have been stopped (destroyed PCB?).
2561 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2566 struct rack_sendmap *rsm = NULL;
2568 uint32_t amm, old_prr_snd = 0;
2569 uint32_t out, avail;
2571 if (tp->t_timers->tt_flags & TT_STOPPED) {
2574 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
2575 /* Its not time yet */
2578 if (rack_progress_timeout_check(tp)) {
2579 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
2583 * A TLP timer has expired. We have been idle for 2 rtts. So we now
2584 * need to figure out how to force a full MSS segment out.
2586 rack_log_to_event(rack, RACK_TO_FRM_TLP);
2587 counter_u64_add(rack_tlp_tot, 1);
2588 if (rack->r_state && (rack->r_state != tp->t_state))
2589 rack_set_state(tp, rack);
2590 so = tp->t_inpcb->inp_socket;
2591 avail = sbavail(&so->so_snd);
2592 out = tp->snd_max - tp->snd_una;
2593 rack->rc_timer_up = 1;
2595 * If we are in recovery we can jazz out a segment if new data is
2596 * present simply by setting rc_prr_sndcnt to a segment.
2598 if ((avail > out) &&
2599 ((rack_always_send_oldest == 0) || (TAILQ_EMPTY(&rack->r_ctl.rc_tmap)))) {
2600 /* New data is available */
2602 if (amm > tp->t_maxseg) {
2604 } else if ((amm < tp->t_maxseg) && ((tp->t_flags & TF_NODELAY) == 0)) {
2605 /* not enough to fill a MTU and no-delay is off */
2608 if (IN_RECOVERY(tp->t_flags)) {
2610 old_prr_snd = rack->r_ctl.rc_prr_sndcnt;
2611 if (out + amm <= tp->snd_wnd)
2612 rack->r_ctl.rc_prr_sndcnt = amm;
2616 /* Set the send-new override */
2617 if (out + amm <= tp->snd_wnd)
2618 rack->r_ctl.rc_tlp_new_data = amm;
2622 rack->r_ctl.rc_tlp_seg_send_cnt = 0;
2623 rack->r_ctl.rc_last_tlp_seq = tp->snd_max;
2624 rack->r_ctl.rc_tlpsend = NULL;
2625 counter_u64_add(rack_tlp_newdata, 1);
2630 * Ok we need to arrange the last un-acked segment to be re-sent, or
2631 * optionally the first un-acked segment.
2633 if (rack_always_send_oldest)
2634 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2636 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_map, rack_sendmap, r_next);
2637 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
2638 rsm = rack_find_high_nonack(rack, rsm);
2642 counter_u64_add(rack_tlp_does_nada, 1);
2644 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
2648 if ((rsm->r_end - rsm->r_start) > tp->t_maxseg) {
2650 * We need to split this the last segment in two.
2653 struct rack_sendmap *nrsm;
2655 nrsm = rack_alloc_full_limit(rack);
2658 * No memory to split, we will just exit and punt
2659 * off to the RXT timer.
2661 counter_u64_add(rack_tlp_does_nada, 1);
2664 nrsm->r_start = (rsm->r_end - tp->t_maxseg);
2665 nrsm->r_end = rsm->r_end;
2666 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
2667 nrsm->r_flags = rsm->r_flags;
2668 nrsm->r_sndcnt = rsm->r_sndcnt;
2669 nrsm->r_rtr_bytes = 0;
2670 rsm->r_end = nrsm->r_start;
2671 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
2672 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
2674 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
2675 if (rsm->r_in_tmap) {
2676 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
2677 nrsm->r_in_tmap = 1;
2679 rsm->r_flags &= (~RACK_HAS_FIN);
2682 rack->r_ctl.rc_tlpsend = rsm;
2683 rack->r_ctl.rc_tlp_rtx_out = 1;
2684 if (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) {
2685 rack->r_ctl.rc_tlp_seg_send_cnt++;
2688 rack->r_ctl.rc_last_tlp_seq = rsm->r_start;
2689 rack->r_ctl.rc_tlp_seg_send_cnt = 1;
2692 rack->r_ctl.rc_tlp_send_cnt++;
2693 if (rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) {
2695 * Can't [re]/transmit a segment we have not heard from the
2696 * peer in max times. We need the retransmit timer to take
2700 rack->r_ctl.rc_tlpsend = NULL;
2702 rsm->r_flags &= ~RACK_TLP;
2703 rack->r_ctl.rc_prr_sndcnt = old_prr_snd;
2704 counter_u64_add(rack_tlp_retran_fail, 1);
2707 rsm->r_flags |= RACK_TLP;
2709 if (rsm && (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) &&
2710 (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) {
2712 * We don't want to send a single segment more than the max
2717 rack->r_timer_override = 1;
2718 rack->r_tlp_running = 1;
2719 rack->rc_tlp_in_progress = 1;
2720 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
2723 rack->rc_timer_up = 0;
2724 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
2729 * Delayed ack Timer, here we simply need to setup the
2730 * ACK_NOW flag and remove the DELACK flag. From there
2731 * the output routine will send the ack out.
2733 * We only return 1, saying don't proceed, if all timers
2734 * are stopped (destroyed PCB?).
2737 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2739 if (tp->t_timers->tt_flags & TT_STOPPED) {
2742 rack_log_to_event(rack, RACK_TO_FRM_DELACK);
2743 tp->t_flags &= ~TF_DELACK;
2744 tp->t_flags |= TF_ACKNOW;
2745 TCPSTAT_INC(tcps_delack);
2746 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
2751 * Persists timer, here we simply need to setup the
2752 * FORCE-DATA flag the output routine will send
2753 * the one byte send.
2755 * We only return 1, saying don't proceed, if all timers
2756 * are stopped (destroyed PCB?).
2759 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2766 if (tp->t_timers->tt_flags & TT_STOPPED) {
2769 if (rack->rc_in_persist == 0)
2771 if (rack_progress_timeout_check(tp)) {
2772 tcp_set_inp_to_drop(inp, ETIMEDOUT);
2775 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
2777 * Persistence timer into zero window. Force a byte to be output, if
2780 TCPSTAT_INC(tcps_persisttimeo);
2782 * Hack: if the peer is dead/unreachable, we do not time out if the
2783 * window is closed. After a full backoff, drop the connection if
2784 * the idle time (no responses to probes) reaches the maximum
2785 * backoff that we would use if retransmitting.
2787 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
2788 (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
2789 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
2790 TCPSTAT_INC(tcps_persistdrop);
2792 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
2795 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
2796 tp->snd_una == tp->snd_max)
2797 rack_exit_persist(tp, rack);
2798 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
2800 * If the user has closed the socket then drop a persisting
2801 * connection after a much reduced timeout.
2803 if (tp->t_state > TCPS_CLOSE_WAIT &&
2804 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
2806 TCPSTAT_INC(tcps_persistdrop);
2807 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
2810 tp->t_flags |= TF_FORCEDATA;
2812 rack_log_to_event(rack, RACK_TO_FRM_PERSIST);
2817 * If a keepalive goes off, we had no other timers
2818 * happening. We always return 1 here since this
2819 * routine either drops the connection or sends
2820 * out a segment with respond.
2823 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2825 struct tcptemp *t_template;
2828 if (tp->t_timers->tt_flags & TT_STOPPED) {
2831 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
2833 rack_log_to_event(rack, RACK_TO_FRM_KEEP);
2835 * Keep-alive timer went off; send something or drop connection if
2836 * idle for too long.
2838 TCPSTAT_INC(tcps_keeptimeo);
2839 if (tp->t_state < TCPS_ESTABLISHED)
2841 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
2842 tp->t_state <= TCPS_CLOSING) {
2843 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
2846 * Send a packet designed to force a response if the peer is
2847 * up and reachable: either an ACK if the connection is
2848 * still alive, or an RST if the peer has closed the
2849 * connection due to timeout or reboot. Using sequence
2850 * number tp->snd_una-1 causes the transmitted zero-length
2851 * segment to lie outside the receive window; by the
2852 * protocol spec, this requires the correspondent TCP to
2855 TCPSTAT_INC(tcps_keepprobe);
2856 t_template = tcpip_maketemplate(inp);
2858 tcp_respond(tp, t_template->tt_ipgen,
2859 &t_template->tt_t, (struct mbuf *)NULL,
2860 tp->rcv_nxt, tp->snd_una - 1, 0);
2861 free(t_template, M_TEMP);
2864 rack_start_hpts_timer(rack, tp, cts, __LINE__, 0, 0, 0);
2867 TCPSTAT_INC(tcps_keepdrops);
2868 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
2873 * Retransmit helper function, clear up all the ack
2874 * flags and take care of important book keeping.
2877 rack_remxt_tmr(struct tcpcb *tp)
2880 * The retransmit timer went off, all sack'd blocks must be
2883 struct rack_sendmap *rsm, *trsm = NULL;
2884 struct tcp_rack *rack;
2887 rack = (struct tcp_rack *)tp->t_fb_ptr;
2888 rack_timer_cancel(tp, rack, tcp_ts_getticks(), __LINE__);
2889 rack_log_to_event(rack, RACK_TO_FRM_TMR);
2890 if (rack->r_state && (rack->r_state != tp->t_state))
2891 rack_set_state(tp, rack);
2893 * Ideally we would like to be able to
2894 * mark SACK-PASS on anything not acked here.
2895 * However, if we do that we would burst out
2896 * all that data 1ms apart. This would be unwise,
2897 * so for now we will just let the normal rxt timer
2898 * and tlp timer take care of it.
2900 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_map, r_next) {
2901 if (rsm->r_flags & RACK_ACKED) {
2904 if (rsm->r_in_tmap == 0) {
2905 /* We must re-add it back to the tlist */
2907 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
2909 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
2915 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS);
2917 /* Clear the count (we just un-acked them) */
2918 rack->r_ctl.rc_sacked = 0;
2919 /* Clear the tlp rtx mark */
2920 rack->r_ctl.rc_tlp_rtx_out = 0;
2921 rack->r_ctl.rc_tlp_seg_send_cnt = 0;
2922 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_map);
2923 /* Setup so we send one segment */
2924 if (rack->r_ctl.rc_prr_sndcnt < tp->t_maxseg)
2925 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
2926 rack->r_timer_override = 1;
2930 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
2931 * we will setup to retransmit the lowest seq number outstanding.
2934 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2942 if (tp->t_timers->tt_flags & TT_STOPPED) {
2945 if (rack_progress_timeout_check(tp)) {
2946 tcp_set_inp_to_drop(inp, ETIMEDOUT);
2949 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
2950 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
2951 (tp->snd_una == tp->snd_max)) {
2952 /* Nothing outstanding .. nothing to do */
2956 * Retransmission timer went off. Message has not been acked within
2957 * retransmit interval. Back off to a longer retransmit interval
2958 * and retransmit one segment.
2960 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
2961 tp->t_rxtshift = TCP_MAXRXTSHIFT;
2962 TCPSTAT_INC(tcps_timeoutdrop);
2964 tcp_set_inp_to_drop(rack->rc_inp,
2965 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT));
2969 if (tp->t_state == TCPS_SYN_SENT) {
2971 * If the SYN was retransmitted, indicate CWND to be limited
2972 * to 1 segment in cc_conn_init().
2975 } else if (tp->t_rxtshift == 1) {
2977 * first retransmit; record ssthresh and cwnd so they can be
2978 * recovered if this turns out to be a "bad" retransmit. A
2979 * retransmit is considered "bad" if an ACK for this segment
2980 * is received within RTT/2 interval; the assumption here is
2981 * that the ACK was already in flight. See "On Estimating
2982 * End-to-End Network Path Properties" by Allman and Paxson
2985 tp->snd_cwnd_prev = tp->snd_cwnd;
2986 tp->snd_ssthresh_prev = tp->snd_ssthresh;
2987 tp->snd_recover_prev = tp->snd_recover;
2988 if (IN_FASTRECOVERY(tp->t_flags))
2989 tp->t_flags |= TF_WASFRECOVERY;
2991 tp->t_flags &= ~TF_WASFRECOVERY;
2992 if (IN_CONGRECOVERY(tp->t_flags))
2993 tp->t_flags |= TF_WASCRECOVERY;
2995 tp->t_flags &= ~TF_WASCRECOVERY;
2996 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
2997 tp->t_flags |= TF_PREVVALID;
2999 tp->t_flags &= ~TF_PREVVALID;
3000 TCPSTAT_INC(tcps_rexmttimeo);
3001 if ((tp->t_state == TCPS_SYN_SENT) ||
3002 (tp->t_state == TCPS_SYN_RECEIVED))
3003 rexmt = MSEC_2_TICKS(RACK_INITIAL_RTO * tcp_syn_backoff[tp->t_rxtshift]);
3005 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
3006 TCPT_RANGESET(tp->t_rxtcur, rexmt,
3007 max(MSEC_2_TICKS(rack_rto_min), rexmt),
3008 MSEC_2_TICKS(rack_rto_max));
3010 * We enter the path for PLMTUD if connection is established or, if
3011 * connection is FIN_WAIT_1 status, reason for the last is that if
3012 * amount of data we send is very small, we could send it in couple
3013 * of packets and process straight to FIN. In that case we won't
3014 * catch ESTABLISHED state.
3017 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false;
3021 if (((V_tcp_pmtud_blackhole_detect == 1) ||
3022 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) ||
3023 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) &&
3024 ((tp->t_state == TCPS_ESTABLISHED) ||
3025 (tp->t_state == TCPS_FIN_WAIT_1))) {
3028 * Idea here is that at each stage of mtu probe (usually,
3029 * 1448 -> 1188 -> 524) should be given 2 chances to recover
3030 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
3031 * should take care of that.
3033 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
3034 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
3035 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
3036 tp->t_rxtshift % 2 == 0)) {
3038 * Enter Path MTU Black-hole Detection mechanism: -
3039 * Disable Path MTU Discovery (IP "DF" bit). -
3040 * Reduce MTU to lower value than what we negotiated
3043 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
3044 /* Record that we may have found a black hole. */
3045 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
3046 /* Keep track of previous MSS. */
3047 tp->t_pmtud_saved_maxseg = tp->t_maxseg;
3051 * Reduce the MSS to blackhole value or to the
3052 * default in an attempt to retransmit.
3056 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
3057 /* Use the sysctl tuneable blackhole MSS. */
3058 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
3059 TCPSTAT_INC(tcps_pmtud_blackhole_activated);
3060 } else if (isipv6) {
3061 /* Use the default MSS. */
3062 tp->t_maxseg = V_tcp_v6mssdflt;
3064 * Disable Path MTU Discovery when we switch
3067 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
3068 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
3071 #if defined(INET6) && defined(INET)
3075 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
3076 /* Use the sysctl tuneable blackhole MSS. */
3077 tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
3078 TCPSTAT_INC(tcps_pmtud_blackhole_activated);
3080 /* Use the default MSS. */
3081 tp->t_maxseg = V_tcp_mssdflt;
3083 * Disable Path MTU Discovery when we switch
3086 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
3087 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
3092 * If further retransmissions are still unsuccessful
3093 * with a lowered MTU, maybe this isn't a blackhole
3094 * and we restore the previous MSS and blackhole
3095 * detection flags. The limit '6' is determined by
3096 * giving each probe stage (1448, 1188, 524) 2
3097 * chances to recover.
3099 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
3100 (tp->t_rxtshift >= 6)) {
3101 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
3102 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
3103 tp->t_maxseg = tp->t_pmtud_saved_maxseg;
3104 TCPSTAT_INC(tcps_pmtud_blackhole_failed);
3109 * Disable RFC1323 and SACK if we haven't got any response to our
3110 * third SYN to work-around some broken terminal servers (most of
3111 * which have hopefully been retired) that have bad VJ header
3112 * compression code which trashes TCP segments containing
3113 * unknown-to-them TCP options.
3115 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
3116 (tp->t_rxtshift == 3))
3117 tp->t_flags &= ~(TF_REQ_SCALE | TF_REQ_TSTMP | TF_SACK_PERMIT);
3119 * If we backed off this far, our srtt estimate is probably bogus.
3120 * Clobber it so we'll take the next rtt measurement as our srtt;
3121 * move the current srtt into rttvar to keep the current retransmit
3124 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
3126 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
3127 in6_losing(tp->t_inpcb);
3130 in_losing(tp->t_inpcb);
3131 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
3134 if (rack_use_sack_filter)
3135 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
3136 tp->snd_recover = tp->snd_max;
3137 tp->t_flags |= TF_ACKNOW;
3139 rack_cong_signal(tp, NULL, CC_RTO);
3145 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling)
3148 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
3153 if (tp->t_state == TCPS_LISTEN) {
3154 /* no timers on listen sockets */
3155 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
3159 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
3162 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
3164 rack_log_to_processing(rack, cts, ret, 0);
3167 if (hpts_calling == 0) {
3169 rack_log_to_processing(rack, cts, ret, 0);
3173 * Ok our timer went off early and we are not paced false
3174 * alarm, go back to sleep.
3177 left = rack->r_ctl.rc_timer_exp - cts;
3178 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left));
3179 rack_log_to_processing(rack, cts, ret, left);
3180 rack->rc_last_pto_set = 0;
3183 rack->rc_tmr_stopped = 0;
3184 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
3185 if (timers & PACE_TMR_DELACK) {
3186 ret = rack_timeout_delack(tp, rack, cts);
3187 } else if (timers & PACE_TMR_RACK) {
3188 ret = rack_timeout_rack(tp, rack, cts);
3189 } else if (timers & PACE_TMR_TLP) {
3190 ret = rack_timeout_tlp(tp, rack, cts);
3191 } else if (timers & PACE_TMR_RXT) {
3192 ret = rack_timeout_rxt(tp, rack, cts);
3193 } else if (timers & PACE_TMR_PERSIT) {
3194 ret = rack_timeout_persist(tp, rack, cts);
3195 } else if (timers & PACE_TMR_KEEP) {
3196 ret = rack_timeout_keepalive(tp, rack, cts);
3198 rack_log_to_processing(rack, cts, ret, timers);
3203 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
3205 uint8_t hpts_removed = 0;
3207 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
3208 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) {
3209 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
3212 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
3213 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
3214 if (rack->rc_inp->inp_in_hpts &&
3215 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
3217 * Canceling timer's when we have no output being
3218 * paced. We also must remove ourselves from the
3221 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
3224 rack_log_to_cancel(rack, hpts_removed, line);
3225 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
3230 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type)
3236 rack_stopall(struct tcpcb *tp)
3238 struct tcp_rack *rack;
3239 rack = (struct tcp_rack *)tp->t_fb_ptr;
3240 rack->t_timers_stopped = 1;
3245 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta)
3251 rack_timer_active(struct tcpcb *tp, uint32_t timer_type)
3257 rack_stop_all_timers(struct tcpcb *tp)
3259 struct tcp_rack *rack;
3262 * Assure no timers are running.
3264 if (tcp_timer_active(tp, TT_PERSIST)) {
3265 /* We enter in persists, set the flag appropriately */
3266 rack = (struct tcp_rack *)tp->t_fb_ptr;
3267 rack->rc_in_persist = 1;
3269 tcp_timer_suspend(tp, TT_PERSIST);
3270 tcp_timer_suspend(tp, TT_REXMT);
3271 tcp_timer_suspend(tp, TT_KEEP);
3272 tcp_timer_suspend(tp, TT_DELACK);
3276 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
3277 struct rack_sendmap *rsm, uint32_t ts)
3283 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
3284 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
3285 rsm->r_flags |= RACK_OVERMAX;
3287 if ((rsm->r_rtr_cnt > 1) && (rack->r_tlp_running == 0)) {
3288 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
3289 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
3291 idx = rsm->r_rtr_cnt - 1;
3292 rsm->r_tim_lastsent[idx] = ts;
3293 if (rsm->r_flags & RACK_ACKED) {
3294 /* Problably MTU discovery messing with us */
3295 rsm->r_flags &= ~RACK_ACKED;
3296 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
3298 if (rsm->r_in_tmap) {
3299 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
3301 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
3303 if (rsm->r_flags & RACK_SACK_PASSED) {
3304 /* We have retransmitted due to the SACK pass */
3305 rsm->r_flags &= ~RACK_SACK_PASSED;
3306 rsm->r_flags |= RACK_WAS_SACKPASS;
3308 /* Update memory for next rtr */
3309 rack->r_ctl.rc_next = TAILQ_NEXT(rsm, r_next);
3314 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
3315 struct rack_sendmap *rsm, uint32_t ts, int32_t * lenp)
3318 * We (re-)transmitted starting at rsm->r_start for some length
3319 * (possibly less than r_end.
3321 struct rack_sendmap *nrsm;
3327 c_end = rsm->r_start + len;
3328 if (SEQ_GEQ(c_end, rsm->r_end)) {
3330 * We retransmitted the whole piece or more than the whole
3331 * slopping into the next rsm.
3333 rack_update_rsm(tp, rack, rsm, ts);
3334 if (c_end == rsm->r_end) {
3340 /* Hangs over the end return whats left */
3341 act_len = rsm->r_end - rsm->r_start;
3342 *lenp = (len - act_len);
3343 return (rsm->r_end);
3345 /* We don't get out of this block. */
3348 * Here we retransmitted less than the whole thing which means we
3349 * have to split this into what was transmitted and what was not.
3351 nrsm = rack_alloc_full_limit(rack);
3354 * We can't get memory, so lets not proceed.
3360 * So here we are going to take the original rsm and make it what we
3361 * retransmitted. nrsm will be the tail portion we did not
3362 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
3363 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
3364 * 1, 6 and the new piece will be 6, 11.
3366 nrsm->r_start = c_end;
3367 nrsm->r_end = rsm->r_end;
3368 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
3369 nrsm->r_flags = rsm->r_flags;
3370 nrsm->r_sndcnt = rsm->r_sndcnt;
3371 nrsm->r_rtr_bytes = 0;
3373 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
3374 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
3376 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
3377 if (rsm->r_in_tmap) {
3378 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
3379 nrsm->r_in_tmap = 1;
3381 rsm->r_flags &= (~RACK_HAS_FIN);
3382 rack_update_rsm(tp, rack, rsm, ts);
3389 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
3390 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts,
3391 uint8_t pass, struct rack_sendmap *hintrsm)
3393 struct tcp_rack *rack;
3394 struct rack_sendmap *rsm, *nrsm;
3395 register uint32_t snd_max, snd_una;
3399 * Add to the RACK log of packets in flight or retransmitted. If
3400 * there is a TS option we will use the TS echoed, if not we will
3403 * Retransmissions will increment the count and move the ts to its
3404 * proper place. Note that if options do not include TS's then we
3405 * won't be able to effectively use the ACK for an RTT on a retran.
3407 * Notes about r_start and r_end. Lets consider a send starting at
3408 * sequence 1 for 10 bytes. In such an example the r_start would be
3409 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
3410 * This means that r_end is actually the first sequence for the next
3415 * If err is set what do we do XXXrrs? should we not add the thing?
3416 * -- i.e. return if err != 0 or should we pretend we sent it? --
3417 * i.e. proceed with add ** do this for now.
3419 INP_WLOCK_ASSERT(tp->t_inpcb);
3422 * We don't log errors -- we could but snd_max does not
3423 * advance in this case either.
3427 if (th_flags & TH_RST) {
3429 * We don't log resets and we return immediately from
3434 rack = (struct tcp_rack *)tp->t_fb_ptr;
3435 snd_una = tp->snd_una;
3436 if (SEQ_LEQ((seq_out + len), snd_una)) {
3437 /* Are sending an old segment to induce an ack (keep-alive)? */
3440 if (SEQ_LT(seq_out, snd_una)) {
3441 /* huh? should we panic? */
3444 end = seq_out + len;
3446 len = end - seq_out;
3448 snd_max = tp->snd_max;
3449 if (th_flags & (TH_SYN | TH_FIN)) {
3451 * The call to rack_log_output is made before bumping
3452 * snd_max. This means we can record one extra byte on a SYN
3453 * or FIN if seq_out is adding more on and a FIN is present
3454 * (and we are not resending).
3456 if (th_flags & TH_SYN)
3458 if (th_flags & TH_FIN)
3460 if (SEQ_LT(snd_max, tp->snd_nxt)) {
3462 * The add/update as not been done for the FIN/SYN
3465 snd_max = tp->snd_nxt;
3469 /* We don't log zero window probes */
3472 rack->r_ctl.rc_time_last_sent = ts;
3473 if (IN_RECOVERY(tp->t_flags)) {
3474 rack->r_ctl.rc_prr_out += len;
3476 /* First question is it a retransmission? */
3477 if (seq_out == snd_max) {
3479 rsm = rack_alloc(rack);
3482 * Hmm out of memory and the tcb got destroyed while
3487 if (th_flags & TH_FIN) {
3488 rsm->r_flags = RACK_HAS_FIN;
3492 rsm->r_tim_lastsent[0] = ts;
3494 rsm->r_rtr_bytes = 0;
3495 rsm->r_start = seq_out;
3496 rsm->r_end = rsm->r_start + len;
3498 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_map, rsm, r_next);
3499 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
3504 * If we reach here its a retransmission and we need to find it.
3507 if (hintrsm && (hintrsm->r_start == seq_out)) {
3510 } else if (rack->r_ctl.rc_next) {
3511 /* We have a hint from a previous run */
3512 rsm = rack->r_ctl.rc_next;
3514 /* No hints sorry */
3517 if ((rsm) && (rsm->r_start == seq_out)) {
3519 * We used rc_next or hintrsm to retransmit, hopefully the
3522 seq_out = rack_update_entry(tp, rack, rsm, ts, &len);
3529 /* Ok it was not the last pointer go through it the hard way. */
3530 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_map, r_next) {
3531 if (rsm->r_start == seq_out) {
3532 seq_out = rack_update_entry(tp, rack, rsm, ts, &len);
3533 rack->r_ctl.rc_next = TAILQ_NEXT(rsm, r_next);
3540 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
3541 /* Transmitted within this piece */
3543 * Ok we must split off the front and then let the
3544 * update do the rest
3546 nrsm = rack_alloc_full_limit(rack);
3548 rack_update_rsm(tp, rack, rsm, ts);
3552 * copy rsm to nrsm and then trim the front of rsm
3553 * to not include this part.
3555 nrsm->r_start = seq_out;
3556 nrsm->r_end = rsm->r_end;
3557 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
3558 nrsm->r_flags = rsm->r_flags;
3559 nrsm->r_sndcnt = rsm->r_sndcnt;
3560 nrsm->r_rtr_bytes = 0;
3561 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
3562 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
3564 rsm->r_end = nrsm->r_start;
3565 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
3566 if (rsm->r_in_tmap) {
3567 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
3568 nrsm->r_in_tmap = 1;
3570 rsm->r_flags &= (~RACK_HAS_FIN);
3571 seq_out = rack_update_entry(tp, rack, nrsm, ts, &len);
3578 * Hmm not found in map did they retransmit both old and on into the
3581 if (seq_out == tp->snd_max) {
3583 } else if (SEQ_LT(seq_out, tp->snd_max)) {
3585 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
3586 seq_out, len, tp->snd_una, tp->snd_max);
3587 printf("Starting Dump of all rack entries\n");
3588 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_map, r_next) {
3589 printf("rsm:%p start:%u end:%u\n",
3590 rsm, rsm->r_start, rsm->r_end);
3592 printf("Dump complete\n");
3593 panic("seq_out not found rack:%p tp:%p",
3599 * Hmm beyond sndmax? (only if we are using the new rtt-pack
3602 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
3603 seq_out, len, tp->snd_max, tp);
3609 * Record one of the RTT updates from an ack into
3610 * our sample structure.
3613 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt)
3615 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
3616 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
3617 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
3619 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
3620 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
3621 rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
3623 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
3624 rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
3625 rack->r_ctl.rack_rs.rs_rtt_cnt++;
3629 * Collect new round-trip time estimate
3630 * and update averages and current timeout.
3633 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
3636 uint32_t o_srtt, o_var;
3639 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
3640 /* No valid sample */
3642 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
3643 /* We are to use the lowest RTT seen in a single ack */
3644 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
3645 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
3646 /* We are to use the highest RTT seen in a single ack */
3647 rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
3648 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
3649 /* We are to use the average RTT seen in a single ack */
3650 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
3651 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
3654 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
3660 rack_log_rtt_sample(rack, rtt);
3661 o_srtt = tp->t_srtt;
3662 o_var = tp->t_rttvar;
3663 rack = (struct tcp_rack *)tp->t_fb_ptr;
3664 if (tp->t_srtt != 0) {
3666 * srtt is stored as fixed point with 5 bits after the
3667 * binary point (i.e., scaled by 8). The following magic is
3668 * equivalent to the smoothing algorithm in rfc793 with an
3669 * alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed point).
3670 * Adjust rtt to origin 0.
3672 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3673 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3675 tp->t_srtt += delta;
3676 if (tp->t_srtt <= 0)
3680 * We accumulate a smoothed rtt variance (actually, a
3681 * smoothed mean difference), then set the retransmit timer
3682 * to smoothed rtt + 4 times the smoothed variance. rttvar
3683 * is stored as fixed point with 4 bits after the binary
3684 * point (scaled by 16). The following is equivalent to
3685 * rfc793 smoothing with an alpha of .75 (rttvar =
3686 * rttvar*3/4 + |delta| / 4). This replaces rfc793's
3691 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3692 tp->t_rttvar += delta;
3693 if (tp->t_rttvar <= 0)
3695 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3696 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3699 * No rtt measurement yet - use the unsmoothed rtt. Set the
3700 * variance to half the rtt (so our first retransmit happens
3703 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3704 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3705 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3707 TCPSTAT_INC(tcps_rttupdated);
3708 rack_log_rtt_upd(tp, rack, rtt, o_srtt, o_var);
3710 #ifdef NETFLIX_STATS
3711 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
3716 * the retransmit should happen at rtt + 4 * rttvar. Because of the
3717 * way we do the smoothing, srtt and rttvar will each average +1/2
3718 * tick of bias. When we compute the retransmit timer, we want 1/2
3719 * tick of rounding and 1 extra tick because of +-1/2 tick
3720 * uncertainty in the firing of the timer. The bias will give us
3721 * exactly the 1.5 tick we need. But, because the bias is
3722 * statistical, we have to test that we don't drop below the minimum
3723 * feasible timer (which is 2 ticks).
3725 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3726 max(MSEC_2_TICKS(rack_rto_min), rtt + 2), MSEC_2_TICKS(rack_rto_max));
3727 tp->t_softerror = 0;
3731 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm,
3732 uint32_t t, uint32_t cts)
3735 * For this RSM, we acknowledged the data from a previous
3736 * transmission, not the last one we made. This means we did a false
3739 struct tcp_rack *rack;
3741 if (rsm->r_flags & RACK_HAS_FIN) {
3743 * The sending of the FIN often is multiple sent when we
3744 * have everything outstanding ack'd. We ignore this case
3745 * since its over now.
3749 if (rsm->r_flags & RACK_TLP) {
3751 * We expect TLP's to have this occur.
3755 rack = (struct tcp_rack *)tp->t_fb_ptr;
3756 /* should we undo cc changes and exit recovery? */
3757 if (IN_RECOVERY(tp->t_flags)) {
3758 if (rack->r_ctl.rc_rsm_start == rsm->r_start) {
3760 * Undo what we ratched down and exit recovery if
3763 EXIT_RECOVERY(tp->t_flags);
3764 tp->snd_recover = tp->snd_una;
3765 if (rack->r_ctl.rc_cwnd_at > tp->snd_cwnd)
3766 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at;
3767 if (rack->r_ctl.rc_ssthresh_at > tp->snd_ssthresh)
3768 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at;
3771 if (rsm->r_flags & RACK_WAS_SACKPASS) {
3773 * We retransmitted based on a sack and the earlier
3774 * retransmission ack'd it - re-ordering is occuring.
3776 counter_u64_add(rack_reorder_seen, 1);
3777 rack->r_ctl.rc_reorder_ts = cts;
3779 counter_u64_add(rack_badfr, 1);
3780 counter_u64_add(rack_badfr_bytes, (rsm->r_end - rsm->r_start));
3785 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
3786 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type)
3791 if (rsm->r_flags & RACK_ACKED)
3796 if ((rsm->r_rtr_cnt == 1) ||
3797 ((ack_type == CUM_ACKED) &&
3798 (to->to_flags & TOF_TS) &&
3800 (rsm->r_tim_lastsent[rsm->r_rtr_cnt - 1] == to->to_tsecr))
3803 * We will only find a matching timestamp if its cum-acked.
3804 * But if its only one retransmission its for-sure matching
3807 t = cts - rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
3810 if (!tp->t_rttlow || tp->t_rttlow > t)
3812 if (!rack->r_ctl.rc_rack_min_rtt ||
3813 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
3814 rack->r_ctl.rc_rack_min_rtt = t;
3815 if (rack->r_ctl.rc_rack_min_rtt == 0) {
3816 rack->r_ctl.rc_rack_min_rtt = 1;
3819 tcp_rack_xmit_timer(rack, TCP_TS_TO_TICKS(t) + 1);
3820 if ((rsm->r_flags & RACK_TLP) &&
3821 (!IN_RECOVERY(tp->t_flags))) {
3822 /* Segment was a TLP and our retrans matched */
3823 if (rack->r_ctl.rc_tlp_cwnd_reduce) {
3824 rack->r_ctl.rc_rsm_start = tp->snd_max;
3825 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
3826 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
3827 rack_cong_signal(tp, NULL, CC_NDUPACK);
3829 * When we enter recovery we need to assure
3830 * we send one packet.
3832 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
3834 rack->r_ctl.rc_tlp_rtx_out = 0;
3836 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
3837 /* New more recent rack_tmit_time */
3838 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
3839 rack->rc_rack_rtt = t;
3844 * We clear the soft/rxtshift since we got an ack.
3845 * There is no assurance we will call the commit() function
3846 * so we need to clear these to avoid incorrect handling.
3849 tp->t_softerror = 0;
3850 if ((to->to_flags & TOF_TS) &&
3851 (ack_type == CUM_ACKED) &&
3853 ((rsm->r_flags & (RACK_DEFERRED | RACK_OVERMAX)) == 0)) {
3855 * Now which timestamp does it match? In this block the ACK
3856 * must be coming from a previous transmission.
3858 for (i = 0; i < rsm->r_rtr_cnt; i++) {
3859 if (rsm->r_tim_lastsent[i] == to->to_tsecr) {
3860 t = cts - rsm->r_tim_lastsent[i];
3863 if ((i + 1) < rsm->r_rtr_cnt) {
3865 rack_earlier_retran(tp, rsm, t, cts);
3867 if (!tp->t_rttlow || tp->t_rttlow > t)
3869 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
3870 rack->r_ctl.rc_rack_min_rtt = t;
3871 if (rack->r_ctl.rc_rack_min_rtt == 0) {
3872 rack->r_ctl.rc_rack_min_rtt = 1;
3876 * Note the following calls to
3877 * tcp_rack_xmit_timer() are being commented
3878 * out for now. They give us no more accuracy
3879 * and often lead to a wrong choice. We have
3880 * enough samples that have not been
3881 * retransmitted. I leave the commented out
3882 * code in here in case in the future we
3883 * decide to add it back (though I can't forsee
3884 * doing that). That way we will easily see
3885 * where they need to be placed.
3887 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
3888 rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
3889 /* New more recent rack_tmit_time */
3890 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
3891 rack->rc_rack_rtt = t;
3899 * Ok its a SACK block that we retransmitted. or a windows
3900 * machine without timestamps. We can tell nothing from the
3901 * time-stamp since its not there or the time the peer last
3902 * recieved a segment that moved forward its cum-ack point.
3905 i = rsm->r_rtr_cnt - 1;
3906 t = cts - rsm->r_tim_lastsent[i];
3909 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
3911 * We retransmitted and the ack came back in less
3912 * than the smallest rtt we have observed. We most
3913 * likey did an improper retransmit as outlined in
3914 * 4.2 Step 3 point 2 in the rack-draft.
3916 i = rsm->r_rtr_cnt - 2;
3917 t = cts - rsm->r_tim_lastsent[i];
3918 rack_earlier_retran(tp, rsm, t, cts);
3919 } else if (rack->r_ctl.rc_rack_min_rtt) {
3921 * We retransmitted it and the retransmit did the
3924 if (!rack->r_ctl.rc_rack_min_rtt ||
3925 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
3926 rack->r_ctl.rc_rack_min_rtt = t;
3927 if (rack->r_ctl.rc_rack_min_rtt == 0) {
3928 rack->r_ctl.rc_rack_min_rtt = 1;
3931 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[i])) {
3932 /* New more recent rack_tmit_time */
3933 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[i];
3934 rack->rc_rack_rtt = t;
3943 * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
3946 rack_log_sack_passed(struct tcpcb *tp,
3947 struct tcp_rack *rack, struct rack_sendmap *rsm)
3949 struct rack_sendmap *nrsm;
3953 idx = rsm->r_rtr_cnt - 1;
3954 ts = rsm->r_tim_lastsent[idx];
3956 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
3957 rack_head, r_tnext) {
3959 /* Skip orginal segment he is acked */
3962 if (nrsm->r_flags & RACK_ACKED) {
3963 /* Skip ack'd segments */
3966 if (nrsm->r_flags & RACK_SACK_PASSED) {
3968 * We found one that is already marked
3969 * passed, we have been here before and
3970 * so all others below this are marked.
3974 idx = nrsm->r_rtr_cnt - 1;
3975 if (ts == nrsm->r_tim_lastsent[idx]) {
3977 * For this case lets use seq no, if we sent in a
3978 * big block (TSO) we would have a bunch of segments
3979 * sent at the same time.
3981 * We would only get a report if its SEQ is earlier.
3982 * If we have done multiple retransmits the times
3983 * would not be equal.
3985 if (SEQ_LT(nrsm->r_start, rsm->r_start)) {
3986 nrsm->r_flags |= RACK_SACK_PASSED;
3987 nrsm->r_flags &= ~RACK_WAS_SACKPASS;
3991 * Here they were sent at different times, not a big
3992 * block. Since we transmitted this one later and
3993 * see it sack'd then this must also be missing (or
3994 * we would have gotten a sack block for it)
3996 nrsm->r_flags |= RACK_SACK_PASSED;
3997 nrsm->r_flags &= ~RACK_WAS_SACKPASS;
4003 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
4004 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts)
4008 uint32_t start, end, changed = 0;
4009 struct rack_sendmap *rsm, *nrsm;
4010 int32_t used_ref = 1;
4012 start = sack->start;
4015 if (rsm && SEQ_LT(start, rsm->r_start)) {
4016 TAILQ_FOREACH_REVERSE_FROM(rsm, &rack->r_ctl.rc_map, rack_head, r_next) {
4017 if (SEQ_GEQ(start, rsm->r_start) &&
4018 SEQ_LT(start, rsm->r_end)) {
4028 /* First lets locate the block where this guy is */
4029 TAILQ_FOREACH_FROM(rsm, &rack->r_ctl.rc_map, r_next) {
4030 if (SEQ_GEQ(start, rsm->r_start) &&
4031 SEQ_LT(start, rsm->r_end)) {
4038 * This happens when we get duplicate sack blocks with the
4039 * same end. For example SACK 4: 100 SACK 3: 100 The sort
4040 * will not change there location so we would just start at
4041 * the end of the first one and get lost.
4043 if (tp->t_flags & TF_SENTFIN) {
4045 * Check to see if we have not logged the FIN that
4048 nrsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_map, rack_sendmap, r_next);
4049 if (nrsm && (nrsm->r_end + 1) == tp->snd_max) {
4051 * Ok we did not get the FIN logged.
4060 panic("tp:%p rack:%p sack:%p to:%p prsm:%p",
4061 tp, rack, sack, to, prsm);
4067 counter_u64_add(rack_sack_proc_restart, 1);
4068 goto start_at_beginning;
4070 /* Ok we have an ACK for some piece of rsm */
4071 if (rsm->r_start != start) {
4073 * Need to split this in two pieces the before and after.
4075 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
4078 * failed XXXrrs what can we do but loose the sack
4083 nrsm->r_start = start;
4084 nrsm->r_rtr_bytes = 0;
4085 nrsm->r_end = rsm->r_end;
4086 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
4087 nrsm->r_flags = rsm->r_flags;
4088 nrsm->r_sndcnt = rsm->r_sndcnt;
4089 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
4090 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
4092 rsm->r_end = nrsm->r_start;
4093 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
4094 if (rsm->r_in_tmap) {
4095 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
4096 nrsm->r_in_tmap = 1;
4098 rsm->r_flags &= (~RACK_HAS_FIN);
4101 if (SEQ_GEQ(end, rsm->r_end)) {
4103 * The end of this block is either beyond this guy or right
4107 if ((rsm->r_flags & RACK_ACKED) == 0) {
4108 rack_update_rtt(tp, rack, rsm, to, cts, SACKED);
4109 changed += (rsm->r_end - rsm->r_start);
4110 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
4111 rack_log_sack_passed(tp, rack, rsm);
4112 /* Is Reordering occuring? */
4113 if (rsm->r_flags & RACK_SACK_PASSED) {
4114 counter_u64_add(rack_reorder_seen, 1);
4115 rack->r_ctl.rc_reorder_ts = cts;
4117 rsm->r_flags |= RACK_ACKED;
4118 rsm->r_flags &= ~RACK_TLP;
4119 if (rsm->r_in_tmap) {
4120 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4124 if (end == rsm->r_end) {
4125 /* This block only - done */
4128 /* There is more not coverend by this rsm move on */
4130 nrsm = TAILQ_NEXT(rsm, r_next);
4135 /* Ok we need to split off this one at the tail */
4136 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
4138 /* failed rrs what can we do but loose the sack info? */
4142 nrsm->r_start = end;
4143 nrsm->r_end = rsm->r_end;
4144 nrsm->r_rtr_bytes = 0;
4145 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
4146 nrsm->r_flags = rsm->r_flags;
4147 nrsm->r_sndcnt = rsm->r_sndcnt;
4148 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
4149 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
4151 /* The sack block does not cover this guy fully */
4152 rsm->r_flags &= (~RACK_HAS_FIN);
4154 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
4155 if (rsm->r_in_tmap) {
4156 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
4157 nrsm->r_in_tmap = 1;
4159 if (rsm->r_flags & RACK_ACKED) {
4160 /* Been here done that */
4163 rack_update_rtt(tp, rack, rsm, to, cts, SACKED);
4164 changed += (rsm->r_end - rsm->r_start);
4165 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
4166 rack_log_sack_passed(tp, rack, rsm);
4167 /* Is Reordering occuring? */
4168 if (rsm->r_flags & RACK_SACK_PASSED) {
4169 counter_u64_add(rack_reorder_seen, 1);
4170 rack->r_ctl.rc_reorder_ts = cts;
4172 rsm->r_flags |= RACK_ACKED;
4173 rsm->r_flags &= ~RACK_TLP;
4174 if (rsm->r_in_tmap) {
4175 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4179 if (rsm && (rsm->r_flags & RACK_ACKED)) {
4181 * Now can we merge this newly acked
4182 * block with either the previous or
4185 nrsm = TAILQ_NEXT(rsm, r_next);
4187 (nrsm->r_flags & RACK_ACKED)) {
4188 /* yep this and next can be merged */
4189 rsm = rack_merge_rsm(rack, rsm, nrsm);
4191 /* Now what about the previous? */
4192 nrsm = TAILQ_PREV(rsm, rack_head, r_next);
4194 (nrsm->r_flags & RACK_ACKED)) {
4195 /* yep the previous and this can be merged */
4196 rsm = rack_merge_rsm(rack, nrsm, rsm);
4199 if (used_ref == 0) {
4200 counter_u64_add(rack_sack_proc_all, 1);
4202 counter_u64_add(rack_sack_proc_short, 1);
4204 /* Save off where we last were */
4206 rack->r_ctl.rc_sacklast = TAILQ_NEXT(rsm, r_next);
4208 rack->r_ctl.rc_sacklast = NULL;
4214 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
4216 struct rack_sendmap *tmap;
4219 while (rsm && (rsm->r_flags & RACK_ACKED)) {
4220 /* Its no longer sacked, mark it so */
4221 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
4223 if (rsm->r_in_tmap) {
4224 panic("rack:%p rsm:%p flags:0x%x in tmap?",
4225 rack, rsm, rsm->r_flags);
4228 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
4229 /* Rebuild it into our tmap */
4231 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4234 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
4237 tmap->r_in_tmap = 1;
4238 rsm = TAILQ_NEXT(rsm, r_next);
4241 * Now lets possibly clear the sack filter so we start
4242 * recognizing sacks that cover this area.
4244 if (rack_use_sack_filter)
4245 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
4250 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
4252 uint32_t changed, last_seq, entered_recovery = 0;
4253 struct tcp_rack *rack;
4254 struct rack_sendmap *rsm;
4255 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
4256 register uint32_t th_ack;
4257 int32_t i, j, k, num_sack_blks = 0;
4258 uint32_t cts, acked, ack_point, sack_changed = 0;
4260 INP_WLOCK_ASSERT(tp->t_inpcb);
4261 if (th->th_flags & TH_RST) {
4262 /* We don't log resets */
4265 rack = (struct tcp_rack *)tp->t_fb_ptr;
4266 cts = tcp_ts_getticks();
4267 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
4269 th_ack = th->th_ack;
4271 if (SEQ_GT(th_ack, tp->snd_una)) {
4272 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
4273 tp->t_acktime = ticks;
4275 if (rsm && SEQ_GT(th_ack, rsm->r_start))
4276 changed = th_ack - rsm->r_start;
4279 * The ACK point is advancing to th_ack, we must drop off
4280 * the packets in the rack log and calculate any eligble
4283 rack->r_wanted_output++;
4285 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
4287 if ((th_ack - 1) == tp->iss) {
4289 * For the SYN incoming case we will not
4290 * have called tcp_output for the sending of
4291 * the SYN, so there will be no map. All
4292 * other cases should probably be a panic.
4296 if (tp->t_flags & TF_SENTFIN) {
4297 /* if we send a FIN we will not hav a map */
4301 panic("No rack map tp:%p for th:%p state:%d rack:%p snd_una:%u snd_max:%u snd_nxt:%u chg:%d\n",
4303 th, tp->t_state, rack,
4304 tp->snd_una, tp->snd_max, tp->snd_nxt, changed);
4308 if (SEQ_LT(th_ack, rsm->r_start)) {
4309 /* Huh map is missing this */
4311 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
4313 th_ack, tp->t_state, rack->r_state);
4317 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED);
4318 /* Now do we consume the whole thing? */
4319 if (SEQ_GEQ(th_ack, rsm->r_end)) {
4320 /* Its all consumed. */
4323 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
4324 rsm->r_rtr_bytes = 0;
4325 TAILQ_REMOVE(&rack->r_ctl.rc_map, rsm, r_next);
4326 if (rsm->r_in_tmap) {
4327 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4330 if (rack->r_ctl.rc_next == rsm) {
4331 /* scoot along the marker */
4332 rack->r_ctl.rc_next = TAILQ_FIRST(&rack->r_ctl.rc_map);
4334 if (rsm->r_flags & RACK_ACKED) {
4336 * It was acked on the scoreboard -- remove
4339 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
4340 } else if (rsm->r_flags & RACK_SACK_PASSED) {
4342 * There are acked segments ACKED on the
4343 * scoreboard further up. We are seeing
4346 counter_u64_add(rack_reorder_seen, 1);
4347 rsm->r_flags |= RACK_ACKED;
4348 rack->r_ctl.rc_reorder_ts = cts;
4350 left = th_ack - rsm->r_end;
4351 if (rsm->r_rtr_cnt > 1) {
4353 * Technically we should make r_rtr_cnt be
4354 * monotonicly increasing and just mod it to
4355 * the timestamp it is replacing.. that way
4356 * we would have the last 3 retransmits. Now
4357 * rc_loss_count will be wrong if we
4358 * retransmit something more than 2 times in
4361 rack->r_ctl.rc_loss_count += (rsm->r_rtr_cnt - 1);
4363 /* Free back to zone */
4364 rack_free(rack, rsm);
4370 if (rsm->r_flags & RACK_ACKED) {
4372 * It was acked on the scoreboard -- remove it from
4373 * total for the part being cum-acked.
4375 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
4377 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
4378 rsm->r_rtr_bytes = 0;
4379 rsm->r_start = th_ack;
4382 /* Check for reneging */
4383 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
4384 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
4386 * The peer has moved snd_una up to
4387 * the edge of this send, i.e. one
4388 * that it had previously acked. The only
4389 * way that can be true if the peer threw
4390 * away data (space issues) that it had
4391 * previously sacked (else it would have
4392 * given us snd_una up to (rsm->r_end).
4393 * We need to undo the acked markings here.
4395 * Note we have to look to make sure th_ack is
4396 * our rsm->r_start in case we get an old ack
4397 * where th_ack is behind snd_una.
4399 rack_peer_reneges(rack, rsm, th->th_ack);
4401 if ((to->to_flags & TOF_SACK) == 0) {
4402 /* We are done nothing left to log */
4405 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_map, rack_sendmap, r_next);
4407 last_seq = rsm->r_end;
4409 last_seq = tp->snd_max;
4411 /* Sack block processing */
4412 if (SEQ_GT(th_ack, tp->snd_una))
4415 ack_point = tp->snd_una;
4416 for (i = 0; i < to->to_nsacks; i++) {
4417 bcopy((to->to_sacks + i * TCPOLEN_SACK),
4418 &sack, sizeof(sack));
4419 sack.start = ntohl(sack.start);
4420 sack.end = ntohl(sack.end);
4421 if (SEQ_GT(sack.end, sack.start) &&
4422 SEQ_GT(sack.start, ack_point) &&
4423 SEQ_LT(sack.start, tp->snd_max) &&
4424 SEQ_GT(sack.end, ack_point) &&
4425 SEQ_LEQ(sack.end, tp->snd_max)) {
4426 if ((rack->r_ctl.rc_num_maps_alloced > rack_sack_block_limit) &&
4427 (SEQ_LT(sack.end, last_seq)) &&
4428 ((sack.end - sack.start) < (tp->t_maxseg / 8))) {
4430 * Not the last piece and its smaller than
4431 * 1/8th of a MSS. We ignore this.
4433 counter_u64_add(rack_runt_sacks, 1);
4436 sack_blocks[num_sack_blks] = sack;
4438 } else if (SEQ_LEQ(sack.start, th_ack) &&
4439 SEQ_LEQ(sack.end, th_ack)) {
4441 * Its a D-SACK block.
4443 /* tcp_record_dsack(sack.start, sack.end); */
4446 if (num_sack_blks == 0)
4449 * Sort the SACK blocks so we can update the rack scoreboard with
4452 if (rack_use_sack_filter) {
4453 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks,
4454 num_sack_blks, th->th_ack);
4455 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks);
4457 if (num_sack_blks < 2) {
4460 /* Sort the sacks */
4461 for (i = 0; i < num_sack_blks; i++) {
4462 for (j = i + 1; j < num_sack_blks; j++) {
4463 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
4464 sack = sack_blocks[i];
4465 sack_blocks[i] = sack_blocks[j];
4466 sack_blocks[j] = sack;
4471 * Now are any of the sack block ends the same (yes some
4472 * implememtations send these)?
4475 if (num_sack_blks > 1) {
4476 for (i = 0; i < num_sack_blks; i++) {
4477 for (j = i + 1; j < num_sack_blks; j++) {
4478 if (sack_blocks[i].end == sack_blocks[j].end) {
4480 * Ok these two have the same end we
4481 * want the smallest end and then
4482 * throw away the larger and start
4485 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
4487 * The second block covers
4488 * more area use that
4490 sack_blocks[i].start = sack_blocks[j].start;
4493 * Now collapse out the dup-sack and
4496 for (k = (j + 1); k < num_sack_blks; k++) {
4497 sack_blocks[j].start = sack_blocks[k].start;
4498 sack_blocks[j].end = sack_blocks[k].end;
4508 rsm = rack->r_ctl.rc_sacklast;
4509 for (i = 0; i < num_sack_blks; i++) {
4510 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts);
4512 rack->r_wanted_output++;
4514 sack_changed += acked;
4519 /* Something changed cancel the rack timer */
4520 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4522 if ((sack_changed) && (!IN_RECOVERY(tp->t_flags))) {
4524 * Ok we have a high probability that we need to go in to
4525 * recovery since we have data sack'd
4527 struct rack_sendmap *rsm;
4530 tsused = tcp_ts_getticks();
4531 rsm = tcp_rack_output(tp, rack, tsused);
4533 /* Enter recovery */
4534 rack->r_ctl.rc_rsm_start = rsm->r_start;
4535 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
4536 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
4537 entered_recovery = 1;
4538 rack_cong_signal(tp, NULL, CC_NDUPACK);
4540 * When we enter recovery we need to assure we send
4543 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
4544 rack->r_timer_override = 1;
4547 if (IN_RECOVERY(tp->t_flags) && (entered_recovery == 0)) {
4548 /* Deal with changed an PRR here (in recovery only) */
4549 uint32_t pipe, snd_una;
4551 rack->r_ctl.rc_prr_delivered += changed;
4552 /* Compute prr_sndcnt */
4553 if (SEQ_GT(tp->snd_una, th_ack)) {
4554 snd_una = tp->snd_una;
4558 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt;
4559 if (pipe > tp->snd_ssthresh) {
4562 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
4563 if (rack->r_ctl.rc_prr_recovery_fs > 0)
4564 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
4566 rack->r_ctl.rc_prr_sndcnt = 0;
4570 if (sndcnt > (long)rack->r_ctl.rc_prr_out)
4571 sndcnt -= rack->r_ctl.rc_prr_out;
4574 rack->r_ctl.rc_prr_sndcnt = sndcnt;
4578 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
4579 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
4582 if (changed > limit)
4584 limit += tp->t_maxseg;
4585 if (tp->snd_ssthresh > pipe) {
4586 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
4588 rack->r_ctl.rc_prr_sndcnt = min(0, limit);
4591 if (rack->r_ctl.rc_prr_sndcnt >= tp->t_maxseg) {
4592 rack->r_timer_override = 1;
4598 * Return value of 1, we do not need to call rack_process_data().
4599 * return value of 0, rack_process_data can be called.
4600 * For ret_val if its 0 the TCP is locked, if its non-zero
4601 * its unlocked and probably unsafe to touch the TCB.
4604 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
4605 struct tcpcb *tp, struct tcpopt *to,
4606 uint32_t tiwin, int32_t tlen,
4607 int32_t * ofia, int32_t thflags, int32_t * ret_val)
4609 int32_t ourfinisacked = 0;
4610 int32_t nsegs, acked_amount;
4613 struct tcp_rack *rack;
4614 int32_t recovery = 0;
4616 rack = (struct tcp_rack *)tp->t_fb_ptr;
4617 if (SEQ_GT(th->th_ack, tp->snd_max)) {
4618 rack_do_dropafterack(m, tp, th, thflags, tlen, ret_val);
4621 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
4622 rack_log_ack(tp, to, th);
4624 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
4626 * Old ack, behind (or duplicate to) the last one rcv'd
4627 * Note: Should mark reordering is occuring! We should also
4628 * look for sack blocks arriving e.g. ack 1, 4-4 then ack 1,
4629 * 3-3, 4-4 would be reording. As well as ack 1, 3-3 <no
4635 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
4636 * something we sent.
4638 if (tp->t_flags & TF_NEEDSYN) {
4640 * T/TCP: Connection was half-synchronized, and our SYN has
4641 * been ACK'd (so connection is now fully synchronized). Go
4642 * to non-starred state, increment snd_una for ACK of SYN,
4643 * and check if we can do window scaling.
4645 tp->t_flags &= ~TF_NEEDSYN;
4647 /* Do window scaling? */
4648 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
4649 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
4650 tp->rcv_scale = tp->request_r_scale;
4651 /* Send window already scaled. */
4654 nsegs = max(1, m->m_pkthdr.lro_nsegs);
4655 INP_WLOCK_ASSERT(tp->t_inpcb);
4657 acked = BYTES_THIS_ACK(tp, th);
4658 TCPSTAT_ADD(tcps_rcvackpack, nsegs);
4659 TCPSTAT_ADD(tcps_rcvackbyte, acked);
4662 * If we just performed our first retransmit, and the ACK arrives
4663 * within our recovery window, then it was a mistake to do the
4664 * retransmit in the first place. Recover our original cwnd and
4665 * ssthresh, and proceed to transmit where we left off.
4667 if (tp->t_flags & TF_PREVVALID) {
4668 tp->t_flags &= ~TF_PREVVALID;
4669 if (tp->t_rxtshift == 1 &&
4670 (int)(ticks - tp->t_badrxtwin) < 0)
4671 rack_cong_signal(tp, th, CC_RTO_ERR);
4674 * If we have a timestamp reply, update smoothed round trip time. If
4675 * no timestamp is present but transmit timer is running and timed
4676 * sequence number was acked, update smoothed round trip time. Since
4677 * we now have an rtt measurement, cancel the timer backoff (cf.,
4678 * Phil Karn's retransmit alg.). Recompute the initial retransmit
4681 * Some boxes send broken timestamp replies during the SYN+ACK
4682 * phase, ignore timestamps of 0 or we could calculate a huge RTT
4683 * and blow up the retransmit timer.
4686 * If all outstanding data is acked, stop retransmit timer and
4687 * remember to restart (more output or persist). If there is more
4688 * data to be acked, restart retransmit timer, using current
4689 * (possibly backed-off) value.
4691 if (th->th_ack == tp->snd_max) {
4692 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4693 rack->r_wanted_output++;
4696 * If no data (only SYN) was ACK'd, skip rest of ACK processing.
4700 *ofia = ourfinisacked;
4703 if (rack->r_ctl.rc_early_recovery) {
4704 if (IN_RECOVERY(tp->t_flags)) {
4705 if (SEQ_LT(th->th_ack, tp->snd_recover) &&
4706 (SEQ_LT(th->th_ack, tp->snd_max))) {
4707 tcp_rack_partialack(tp, th);
4709 rack_post_recovery(tp, th);
4715 * Let the congestion control algorithm update congestion control
4716 * related information. This typically means increasing the
4717 * congestion window.
4719 rack_ack_received(tp, rack, th, nsegs, CC_ACK, recovery);
4720 SOCKBUF_LOCK(&so->so_snd);
4721 acked_amount = min(acked, (int)sbavail(&so->so_snd));
4722 tp->snd_wnd -= acked_amount;
4723 mfree = sbcut_locked(&so->so_snd, acked_amount);
4724 if ((sbused(&so->so_snd) == 0) &&
4725 (acked > acked_amount) &&
4726 (tp->t_state >= TCPS_FIN_WAIT_1)) {
4729 /* NB: sowwakeup_locked() does an implicit unlock. */
4730 sowwakeup_locked(so);
4732 if (rack->r_ctl.rc_early_recovery == 0) {
4733 if (IN_RECOVERY(tp->t_flags)) {
4734 if (SEQ_LT(th->th_ack, tp->snd_recover) &&
4735 (SEQ_LT(th->th_ack, tp->snd_max))) {
4736 tcp_rack_partialack(tp, th);
4738 rack_post_recovery(tp, th);
4742 tp->snd_una = th->th_ack;
4743 if (SEQ_GT(tp->snd_una, tp->snd_recover))
4744 tp->snd_recover = tp->snd_una;
4746 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
4747 tp->snd_nxt = tp->snd_una;
4749 if (tp->snd_una == tp->snd_max) {
4750 /* Nothing left outstanding */
4751 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
4753 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4754 /* Set need output so persist might get set */
4755 rack->r_wanted_output++;
4756 if (rack_use_sack_filter)
4757 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
4758 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
4759 (sbavail(&so->so_snd) == 0) &&
4760 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
4762 * The socket was gone and the
4763 * peer sent data, time to
4768 rack_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
4773 *ofia = ourfinisacked;
4779 * Return value of 1, the TCB is unlocked and most
4780 * likely gone, return value of 0, the TCP is still
4784 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
4785 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
4786 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
4789 * Update window information. Don't look at window if no ACK: TAC's
4790 * send garbage on first SYN.
4796 #define tfo_syn (FALSE)
4798 struct tcp_rack *rack;
4800 rack = (struct tcp_rack *)tp->t_fb_ptr;
4801 INP_WLOCK_ASSERT(tp->t_inpcb);
4802 nsegs = max(1, m->m_pkthdr.lro_nsegs);
4803 if ((thflags & TH_ACK) &&
4804 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
4805 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
4806 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
4807 /* keep track of pure window updates */
4809 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
4810 TCPSTAT_INC(tcps_rcvwinupd);
4811 tp->snd_wnd = tiwin;
4812 tp->snd_wl1 = th->th_seq;
4813 tp->snd_wl2 = th->th_ack;
4814 if (tp->snd_wnd > tp->max_sndwnd)
4815 tp->max_sndwnd = tp->snd_wnd;
4816 rack->r_wanted_output++;
4817 } else if (thflags & TH_ACK) {
4818 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
4819 tp->snd_wnd = tiwin;
4820 tp->snd_wl1 = th->th_seq;
4821 tp->snd_wl2 = th->th_ack;
4824 /* Was persist timer active and now we have window space? */
4825 if ((rack->rc_in_persist != 0) && tp->snd_wnd) {
4826 rack_exit_persist(tp, rack);
4827 tp->snd_nxt = tp->snd_max;
4828 /* Make sure we output to start the timer */
4829 rack->r_wanted_output++;
4831 if (tp->t_flags2 & TF2_DROP_AF_DATA) {
4836 * Process segments with URG.
4838 if ((thflags & TH_URG) && th->th_urp &&
4839 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
4841 * This is a kludge, but if we receive and accept random
4842 * urgent pointers, we'll crash in soreceive. It's hard to
4843 * imagine someone actually wanting to send this much urgent
4846 SOCKBUF_LOCK(&so->so_rcv);
4847 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
4848 th->th_urp = 0; /* XXX */
4849 thflags &= ~TH_URG; /* XXX */
4850 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
4851 goto dodata; /* XXX */
4854 * If this segment advances the known urgent pointer, then
4855 * mark the data stream. This should not happen in
4856 * CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since a
4857 * FIN has been received from the remote side. In these
4858 * states we ignore the URG.
4860 * According to RFC961 (Assigned Protocols), the urgent
4861 * pointer points to the last octet of urgent data. We
4862 * continue, however, to consider it to indicate the first
4863 * octet of data past the urgent section as the original
4864 * spec states (in one of two places).
4866 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) {
4867 tp->rcv_up = th->th_seq + th->th_urp;
4868 so->so_oobmark = sbavail(&so->so_rcv) +
4869 (tp->rcv_up - tp->rcv_nxt) - 1;
4870 if (so->so_oobmark == 0)
4871 so->so_rcv.sb_state |= SBS_RCVATMARK;
4873 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
4875 SOCKBUF_UNLOCK(&so->so_rcv);
4877 * Remove out of band data so doesn't get presented to user.
4878 * This can happen independent of advancing the URG pointer,
4879 * but if two URG's are pending at once, some out-of-band
4880 * data may creep in... ick.
4882 if (th->th_urp <= (uint32_t) tlen &&
4883 !(so->so_options & SO_OOBINLINE)) {
4884 /* hdr drop is delayed */
4885 tcp_pulloutofband(so, th, m, drop_hdrlen);
4889 * If no out of band data is expected, pull receive urgent
4890 * pointer along with the receive window.
4892 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
4893 tp->rcv_up = tp->rcv_nxt;
4896 INP_WLOCK_ASSERT(tp->t_inpcb);
4899 * Process the segment text, merging it into the TCP sequencing
4900 * queue, and arranging for acknowledgment of receipt if necessary.
4901 * This process logically involves adjusting tp->rcv_wnd as data is
4902 * presented to the user (this happens in tcp_usrreq.c, case
4903 * PRU_RCVD). If a FIN has already been received on this connection
4904 * then we just ignore the text.
4907 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
4908 (tp->t_flags & TF_FASTOPEN));
4910 if ((tlen || (thflags & TH_FIN) || tfo_syn) &&
4911 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
4912 tcp_seq save_start = th->th_seq;
4913 tcp_seq save_rnxt = tp->rcv_nxt;
4914 int save_tlen = tlen;
4916 m_adj(m, drop_hdrlen); /* delayed header drop */
4918 * Insert segment which includes th into TCP reassembly
4919 * queue with control block tp. Set thflags to whether
4920 * reassembly now includes a segment with FIN. This handles
4921 * the common case inline (segment is the next to be
4922 * received on an established connection, and the queue is
4923 * empty), avoiding linkage into and removal from the queue
4924 * and repetition of various conversions. Set DELACK for
4925 * segments received in order, but ack immediately when
4926 * segments are out of order (so fast retransmit can work).
4928 if (th->th_seq == tp->rcv_nxt &&
4930 (TCPS_HAVEESTABLISHED(tp->t_state) ||
4932 if (DELAY_ACK(tp, tlen) || tfo_syn) {
4933 rack_timer_cancel(tp, rack,
4934 rack->r_ctl.rc_rcvtime, __LINE__);
4935 tp->t_flags |= TF_DELACK;
4937 rack->r_wanted_output++;
4938 tp->t_flags |= TF_ACKNOW;
4940 tp->rcv_nxt += tlen;
4941 thflags = th->th_flags & TH_FIN;
4942 TCPSTAT_ADD(tcps_rcvpack, nsegs);
4943 TCPSTAT_ADD(tcps_rcvbyte, tlen);
4944 SOCKBUF_LOCK(&so->so_rcv);
4945 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
4948 sbappendstream_locked(&so->so_rcv, m, 0);
4949 /* NB: sorwakeup_locked() does an implicit unlock. */
4950 sorwakeup_locked(so);
4953 * XXX: Due to the header drop above "th" is
4954 * theoretically invalid by now. Fortunately
4955 * m_adj() doesn't actually frees any mbufs when
4956 * trimming from the head.
4958 tcp_seq temp = save_start;
4959 thflags = tcp_reass(tp, th, &temp, &tlen, m);
4960 tp->t_flags |= TF_ACKNOW;
4962 if ((tp->t_flags & TF_SACK_PERMIT) && (save_tlen > 0)) {
4963 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
4965 * DSACK actually handled in the fastpath
4968 tcp_update_sack_list(tp, save_start,
4969 save_start + save_tlen);
4970 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
4971 if ((tp->rcv_numsacks >= 1) &&
4972 (tp->sackblks[0].end == save_start)) {
4974 * Partial overlap, recorded at todrop
4977 tcp_update_sack_list(tp,
4978 tp->sackblks[0].start,
4979 tp->sackblks[0].end);
4981 tcp_update_dsack_list(tp, save_start,
4982 save_start + save_tlen);
4984 } else if (tlen >= save_tlen) {
4985 /* Update of sackblks. */
4986 tcp_update_dsack_list(tp, save_start,
4987 save_start + save_tlen);
4988 } else if (tlen > 0) {
4989 tcp_update_dsack_list(tp, save_start,
4999 * If FIN is received ACK the FIN and let the user know that the
5000 * connection is closing.
5002 if (thflags & TH_FIN) {
5003 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
5006 * If connection is half-synchronized (ie NEEDSYN
5007 * flag on) then delay ACK, so it may be piggybacked
5008 * when SYN is sent. Otherwise, since we received a
5009 * FIN then no more input can be expected, send ACK
5012 if (tp->t_flags & TF_NEEDSYN) {
5013 rack_timer_cancel(tp, rack,
5014 rack->r_ctl.rc_rcvtime, __LINE__);
5015 tp->t_flags |= TF_DELACK;
5017 tp->t_flags |= TF_ACKNOW;
5021 switch (tp->t_state) {
5024 * In SYN_RECEIVED and ESTABLISHED STATES enter the
5027 case TCPS_SYN_RECEIVED:
5028 tp->t_starttime = ticks;
5030 case TCPS_ESTABLISHED:
5031 rack_timer_cancel(tp, rack,
5032 rack->r_ctl.rc_rcvtime, __LINE__);
5033 tcp_state_change(tp, TCPS_CLOSE_WAIT);
5037 * If still in FIN_WAIT_1 STATE FIN has not been
5038 * acked so enter the CLOSING state.
5040 case TCPS_FIN_WAIT_1:
5041 rack_timer_cancel(tp, rack,
5042 rack->r_ctl.rc_rcvtime, __LINE__);
5043 tcp_state_change(tp, TCPS_CLOSING);
5047 * In FIN_WAIT_2 state enter the TIME_WAIT state,
5048 * starting the time-wait timer, turning off the
5049 * other standard timers.
5051 case TCPS_FIN_WAIT_2:
5052 rack_timer_cancel(tp, rack,
5053 rack->r_ctl.rc_rcvtime, __LINE__);
5054 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
5060 * Return any desired output.
5062 if ((tp->t_flags & TF_ACKNOW) ||
5063 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
5064 rack->r_wanted_output++;
5066 INP_WLOCK_ASSERT(tp->t_inpcb);
5071 * Here nothing is really faster, its just that we
5072 * have broken out the fast-data path also just like
5076 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
5077 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5078 uint32_t tiwin, int32_t nxt_pkt)
5081 int32_t newsize = 0; /* automatic sockbuf scaling */
5082 struct tcp_rack *rack;
5085 * The size of tcp_saveipgen must be the size of the max ip header,
5088 u_char tcp_saveipgen[IP6_HDR_LEN];
5089 struct tcphdr tcp_savetcp;
5094 * If last ACK falls within this segment's sequence numbers, record
5095 * the timestamp. NOTE that the test is modified according to the
5096 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
5098 if (__predict_false(th->th_seq != tp->rcv_nxt)) {
5101 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
5104 if (tiwin && tiwin != tp->snd_wnd) {
5107 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
5110 if (__predict_false((to->to_flags & TOF_TS) &&
5111 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
5114 if (__predict_false((th->th_ack != tp->snd_una))) {
5117 if (__predict_false(tlen > sbspace(&so->so_rcv))) {
5120 if ((to->to_flags & TOF_TS) != 0 &&
5121 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
5122 tp->ts_recent_age = tcp_ts_getticks();
5123 tp->ts_recent = to->to_tsval;
5125 rack = (struct tcp_rack *)tp->t_fb_ptr;
5127 * This is a pure, in-sequence data packet with nothing on the
5128 * reassembly queue and we have enough buffer space to take it.
5130 nsegs = max(1, m->m_pkthdr.lro_nsegs);
5133 /* Clean receiver SACK report if present */
5134 /* if (tp->rcv_numsacks)
5135 tcp_clean_sackreport(tp);
5138 TCPSTAT_INC(tcps_preddat);
5139 tp->rcv_nxt += tlen;
5141 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
5143 tp->snd_wl1 = th->th_seq;
5145 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
5147 tp->rcv_up = tp->rcv_nxt;
5148 TCPSTAT_ADD(tcps_rcvpack, nsegs);
5149 TCPSTAT_ADD(tcps_rcvbyte, tlen);
5151 if (so->so_options & SO_DEBUG)
5152 tcp_trace(TA_INPUT, ostate, tp,
5153 (void *)tcp_saveipgen, &tcp_savetcp, 0);
5155 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
5157 /* Add data to socket buffer. */
5158 SOCKBUF_LOCK(&so->so_rcv);
5159 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5163 * Set new socket buffer size. Give up when limit is
5167 if (!sbreserve_locked(&so->so_rcv,
5169 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
5170 m_adj(m, drop_hdrlen); /* delayed header drop */
5171 sbappendstream_locked(&so->so_rcv, m, 0);
5172 rack_calc_rwin(so, tp);
5174 /* NB: sorwakeup_locked() does an implicit unlock. */
5175 sorwakeup_locked(so);
5176 if (DELAY_ACK(tp, tlen)) {
5177 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
5178 tp->t_flags |= TF_DELACK;
5180 tp->t_flags |= TF_ACKNOW;
5181 rack->r_wanted_output++;
5183 if ((tp->snd_una == tp->snd_max) && rack_use_sack_filter)
5184 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
5189 * This subfunction is used to try to highly optimize the
5190 * fast path. We again allow window updates that are
5191 * in sequence to remain in the fast-path. We also add
5192 * in the __predict's to attempt to help the compiler.
5193 * Note that if we return a 0, then we can *not* process
5194 * it and the caller should push the packet into the
5198 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
5199 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5200 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
5207 * The size of tcp_saveipgen must be the size of the max ip header,
5210 u_char tcp_saveipgen[IP6_HDR_LEN];
5211 struct tcphdr tcp_savetcp;
5215 struct tcp_rack *rack;
5217 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
5218 /* Old ack, behind (or duplicate to) the last one rcv'd */
5221 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
5222 /* Above what we have sent? */
5225 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
5226 /* We are retransmitting */
5229 if (__predict_false(tiwin == 0)) {
5233 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
5234 /* We need a SYN or a FIN, unlikely.. */
5237 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
5238 /* Timestamp is behind .. old ack with seq wrap? */
5241 if (__predict_false(IN_RECOVERY(tp->t_flags))) {
5242 /* Still recovering */
5245 rack = (struct tcp_rack *)tp->t_fb_ptr;
5246 if (rack->r_ctl.rc_sacked) {
5247 /* We have sack holes on our scoreboard */
5250 /* Ok if we reach here, we can process a fast-ack */
5251 nsegs = max(1, m->m_pkthdr.lro_nsegs);
5252 rack_log_ack(tp, to, th);
5253 /* Did the window get updated? */
5254 if (tiwin != tp->snd_wnd) {
5255 tp->snd_wnd = tiwin;
5256 tp->snd_wl1 = th->th_seq;
5257 if (tp->snd_wnd > tp->max_sndwnd)
5258 tp->max_sndwnd = tp->snd_wnd;
5260 if ((rack->rc_in_persist != 0) && (tp->snd_wnd >= tp->t_maxseg)) {
5261 rack_exit_persist(tp, rack);
5264 * If last ACK falls within this segment's sequence numbers, record
5265 * the timestamp. NOTE that the test is modified according to the
5266 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
5268 if ((to->to_flags & TOF_TS) != 0 &&
5269 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
5270 tp->ts_recent_age = tcp_ts_getticks();
5271 tp->ts_recent = to->to_tsval;
5274 * This is a pure ack for outstanding data.
5276 TCPSTAT_INC(tcps_predack);
5279 * "bad retransmit" recovery.
5281 if (tp->t_flags & TF_PREVVALID) {
5282 tp->t_flags &= ~TF_PREVVALID;
5283 if (tp->t_rxtshift == 1 &&
5284 (int)(ticks - tp->t_badrxtwin) < 0)
5285 rack_cong_signal(tp, th, CC_RTO_ERR);
5288 * Recalculate the transmit timer / rtt.
5290 * Some boxes send broken timestamp replies during the SYN+ACK
5291 * phase, ignore timestamps of 0 or we could calculate a huge RTT
5292 * and blow up the retransmit timer.
5294 acked = BYTES_THIS_ACK(tp, th);
5297 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
5298 hhook_run_tcp_est_in(tp, th, to);
5301 TCPSTAT_ADD(tcps_rcvackpack, nsegs);
5302 TCPSTAT_ADD(tcps_rcvackbyte, acked);
5303 sbdrop(&so->so_snd, acked);
5305 * Let the congestion control algorithm update congestion control
5306 * related information. This typically means increasing the
5307 * congestion window.
5309 rack_ack_received(tp, rack, th, nsegs, CC_ACK, 0);
5311 tp->snd_una = th->th_ack;
5313 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
5315 tp->snd_wl2 = th->th_ack;
5318 /* ND6_HINT(tp); *//* Some progress has been made. */
5321 * If all outstanding data are acked, stop retransmit timer,
5322 * otherwise restart timer using current (possibly backed-off)
5323 * value. If process is waiting for space, wakeup/selwakeup/signal.
5324 * If data are ready to send, let tcp_output decide between more
5325 * output or persist.
5328 if (so->so_options & SO_DEBUG)
5329 tcp_trace(TA_INPUT, ostate, tp,
5330 (void *)tcp_saveipgen,
5333 if (tp->snd_una == tp->snd_max) {
5334 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
5336 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
5338 /* Wake up the socket if we have room to write more */
5340 if (sbavail(&so->so_snd)) {
5341 rack->r_wanted_output++;
5347 * Return value of 1, the TCB is unlocked and most
5348 * likely gone, return value of 0, the TCP is still
5352 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
5353 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5354 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5356 int32_t ret_val = 0;
5358 int32_t ourfinisacked = 0;
5360 rack_calc_rwin(so, tp);
5362 * If the state is SYN_SENT: if seg contains an ACK, but not for our
5363 * SYN, drop the input. if seg contains a RST, then drop the
5364 * connection. if seg does not contain SYN, then drop it. Otherwise
5365 * this is an acceptable SYN segment initialize tp->rcv_nxt and
5366 * tp->irs if seg contains ack then advance tp->snd_una if seg
5367 * contains an ECE and ECN support is enabled, the stream is ECN
5368 * capable. if SYN has been acked change to ESTABLISHED else
5369 * SYN_RCVD state arrange for segment to be acked (eventually)
5370 * continue processing rest of data/controls, beginning with URG
5372 if ((thflags & TH_ACK) &&
5373 (SEQ_LEQ(th->th_ack, tp->iss) ||
5374 SEQ_GT(th->th_ack, tp->snd_max))) {
5375 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
5378 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
5379 TCP_PROBE5(connect__refused, NULL, tp,
5380 mtod(m, const char *), tp, th);
5381 tp = tcp_drop(tp, ECONNREFUSED);
5382 rack_do_drop(m, tp);
5385 if (thflags & TH_RST) {
5386 rack_do_drop(m, tp);
5389 if (!(thflags & TH_SYN)) {
5390 rack_do_drop(m, tp);
5393 tp->irs = th->th_seq;
5395 if (thflags & TH_ACK) {
5396 TCPSTAT_INC(tcps_connects);
5399 mac_socketpeer_set_from_mbuf(m, so);
5401 /* Do window scaling on this connection? */
5402 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
5403 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
5404 tp->rcv_scale = tp->request_r_scale;
5406 tp->rcv_adv += min(tp->rcv_wnd,
5407 TCP_MAXWIN << tp->rcv_scale);
5409 * If there's data, delay ACK; if there's also a FIN ACKNOW
5410 * will be turned on later.
5412 if (DELAY_ACK(tp, tlen) && tlen != 0) {
5413 rack_timer_cancel(tp, (struct tcp_rack *)tp->t_fb_ptr,
5414 ((struct tcp_rack *)tp->t_fb_ptr)->r_ctl.rc_rcvtime, __LINE__);
5415 tp->t_flags |= TF_DELACK;
5417 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++;
5418 tp->t_flags |= TF_ACKNOW;
5421 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) &&
5422 (V_tcp_do_ecn == 1)) {
5423 tp->t_flags |= TF_ECN_PERMIT;
5424 TCPSTAT_INC(tcps_ecn_shs);
5427 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
5428 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
5430 tp->t_starttime = ticks;
5431 if (tp->t_flags & TF_NEEDFIN) {
5432 tcp_state_change(tp, TCPS_FIN_WAIT_1);
5433 tp->t_flags &= ~TF_NEEDFIN;
5436 tcp_state_change(tp, TCPS_ESTABLISHED);
5437 TCP_PROBE5(connect__established, NULL, tp,
5438 mtod(m, const char *), tp, th);
5443 * Received initial SYN in SYN-SENT[*] state => simultaneous
5444 * open. If segment contains CC option and there is a
5445 * cached CC, apply TAO test. If it succeeds, connection is *
5446 * half-synchronized. Otherwise, do 3-way handshake:
5447 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
5448 * there was no CC option, clear cached CC value.
5450 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
5451 tcp_state_change(tp, TCPS_SYN_RECEIVED);
5453 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
5454 INP_WLOCK_ASSERT(tp->t_inpcb);
5456 * Advance th->th_seq to correspond to first data byte. If data,
5457 * trim to stay within window, dropping FIN if necessary.
5460 if (tlen > tp->rcv_wnd) {
5461 todrop = tlen - tp->rcv_wnd;
5465 TCPSTAT_INC(tcps_rcvpackafterwin);
5466 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
5468 tp->snd_wl1 = th->th_seq - 1;
5469 tp->rcv_up = th->th_seq;
5471 * Client side of transaction: already sent SYN and data. If the
5472 * remote host used T/TCP to validate the SYN, our data will be
5473 * ACK'd; if so, enter normal data segment processing in the middle
5474 * of step 5, ack processing. Otherwise, goto step 6.
5476 if (thflags & TH_ACK) {
5477 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
5479 /* We may have changed to FIN_WAIT_1 above */
5480 if (tp->t_state == TCPS_FIN_WAIT_1) {
5482 * In FIN_WAIT_1 STATE in addition to the processing
5483 * for the ESTABLISHED state if our FIN is now
5484 * acknowledged then enter FIN_WAIT_2.
5486 if (ourfinisacked) {
5488 * If we can't receive any more data, then
5489 * closing user can proceed. Starting the
5490 * timer is contrary to the specification,
5491 * but if we don't get a FIN we'll hang
5494 * XXXjl: we should release the tp also, and
5495 * use a compressed state.
5497 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5498 soisdisconnected(so);
5499 tcp_timer_activate(tp, TT_2MSL,
5500 (tcp_fast_finwait2_recycle ?
5501 tcp_finwait2_timeout :
5504 tcp_state_change(tp, TCPS_FIN_WAIT_2);
5508 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5509 tiwin, thflags, nxt_pkt));
5513 * Return value of 1, the TCB is unlocked and most
5514 * likely gone, return value of 0, the TCP is still
5518 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
5519 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5520 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5522 int32_t ret_val = 0;
5523 int32_t ourfinisacked = 0;
5525 rack_calc_rwin(so, tp);
5527 if ((thflags & TH_ACK) &&
5528 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
5529 SEQ_GT(th->th_ack, tp->snd_max))) {
5530 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
5534 if (tp->t_flags & TF_FASTOPEN) {
5536 * When a TFO connection is in SYN_RECEIVED, the only valid
5537 * packets are the initial SYN, a retransmit/copy of the
5538 * initial SYN (possibly with a subset of the original
5539 * data), a valid ACK, a FIN, or a RST.
5541 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
5542 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
5544 } else if (thflags & TH_SYN) {
5545 /* non-initial SYN is ignored */
5546 struct tcp_rack *rack;
5548 rack = (struct tcp_rack *)tp->t_fb_ptr;
5549 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
5550 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
5551 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
5552 rack_do_drop(m, NULL);
5555 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
5556 rack_do_drop(m, NULL);
5561 if (thflags & TH_RST)
5562 return (rack_process_rst(m, th, so, tp));
5564 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5565 * synchronized state.
5567 if (thflags & TH_SYN) {
5568 rack_challenge_ack(m, th, tp, &ret_val);
5572 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5573 * it's less than ts_recent, drop it.
5575 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5576 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5577 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val))
5581 * In the SYN-RECEIVED state, validate that the packet belongs to
5582 * this connection before trimming the data to fit the receive
5583 * window. Check the sequence number versus IRS since we know the
5584 * sequence numbers haven't wrapped. This is a partial fix for the
5585 * "LAND" DoS attack.
5587 if (SEQ_LT(th->th_seq, tp->irs)) {
5588 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
5591 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
5595 * If last ACK falls within this segment's sequence numbers, record
5596 * its timestamp. NOTE: 1) That the test incorporates suggestions
5597 * from the latest proposal of the tcplw@cray.com list (Braden
5598 * 1993/04/26). 2) That updating only on newer timestamps interferes
5599 * with our earlier PAWS tests, so this check should be solely
5600 * predicated on the sequence space of this segment. 3) That we
5601 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
5602 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
5603 * SEG.Len, This modified check allows us to overcome RFC1323's
5604 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
5605 * p.869. In such cases, we can still calculate the RTT correctly
5606 * when RCV.NXT == Last.ACK.Sent.
5608 if ((to->to_flags & TOF_TS) != 0 &&
5609 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
5610 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
5611 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
5612 tp->ts_recent_age = tcp_ts_getticks();
5613 tp->ts_recent = to->to_tsval;
5616 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
5617 * is on (half-synchronized state), then queue data for later
5618 * processing; else drop segment and return.
5620 if ((thflags & TH_ACK) == 0) {
5622 if (tp->t_flags & TF_FASTOPEN) {
5623 tp->snd_wnd = tiwin;
5627 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5628 tiwin, thflags, nxt_pkt));
5630 TCPSTAT_INC(tcps_connects);
5632 /* Do window scaling? */
5633 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
5634 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
5635 tp->rcv_scale = tp->request_r_scale;
5636 tp->snd_wnd = tiwin;
5639 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* ->
5642 tp->t_starttime = ticks;
5643 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
5644 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
5645 tp->t_tfo_pending = NULL;
5647 if (tp->t_flags & TF_NEEDFIN) {
5648 tcp_state_change(tp, TCPS_FIN_WAIT_1);
5649 tp->t_flags &= ~TF_NEEDFIN;
5651 tcp_state_change(tp, TCPS_ESTABLISHED);
5652 TCP_PROBE5(accept__established, NULL, tp,
5653 mtod(m, const char *), tp, th);
5655 if (tp->t_tfo_pending) {
5656 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
5657 tp->t_tfo_pending = NULL;
5660 * Account for the ACK of our SYN prior to regular
5661 * ACK processing below.
5666 * TFO connections call cc_conn_init() during SYN
5667 * processing. Calling it again here for such connections
5668 * is not harmless as it would undo the snd_cwnd reduction
5669 * that occurs when a TFO SYN|ACK is retransmitted.
5671 if (!(tp->t_flags & TF_FASTOPEN))
5676 * Account for the ACK of our SYN prior to
5677 * regular ACK processing below, except for
5678 * simultaneous SYN, which is handled later.
5680 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
5683 * If segment contains data or ACK, will call tcp_reass() later; if
5684 * not, do so now to pass queued data to user.
5686 if (tlen == 0 && (thflags & TH_FIN) == 0)
5687 (void)tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
5689 tp->snd_wl1 = th->th_seq - 1;
5690 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
5693 if (tp->t_state == TCPS_FIN_WAIT_1) {
5694 /* We could have went to FIN_WAIT_1 (or EST) above */
5696 * In FIN_WAIT_1 STATE in addition to the processing for the
5697 * ESTABLISHED state if our FIN is now acknowledged then
5700 if (ourfinisacked) {
5702 * If we can't receive any more data, then closing
5703 * user can proceed. Starting the timer is contrary
5704 * to the specification, but if we don't get a FIN
5705 * we'll hang forever.
5707 * XXXjl: we should release the tp also, and use a
5710 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5711 soisdisconnected(so);
5712 tcp_timer_activate(tp, TT_2MSL,
5713 (tcp_fast_finwait2_recycle ?
5714 tcp_finwait2_timeout :
5717 tcp_state_change(tp, TCPS_FIN_WAIT_2);
5720 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5721 tiwin, thflags, nxt_pkt));
5725 * Return value of 1, the TCB is unlocked and most
5726 * likely gone, return value of 0, the TCP is still
5730 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
5731 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5732 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5734 int32_t ret_val = 0;
5737 * Header prediction: check for the two common cases of a
5738 * uni-directional data xfer. If the packet has no control flags,
5739 * is in-sequence, the window didn't change and we're not
5740 * retransmitting, it's a candidate. If the length is zero and the
5741 * ack moved forward, we're the sender side of the xfer. Just free
5742 * the data acked & wake any higher level process that was blocked
5743 * waiting for space. If the length is non-zero and the ack didn't
5744 * move, we're the receiver side. If we're getting packets in-order
5745 * (the reassembly queue is empty), add the data toc The socket
5746 * buffer and note that we need a delayed ack. Make sure that the
5747 * hidden state-flags are also off. Since we check for
5748 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
5750 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
5751 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK)) == TH_ACK) &&
5752 __predict_true(SEGQ_EMPTY(tp)) &&
5753 __predict_true(th->th_seq == tp->rcv_nxt)) {
5754 struct tcp_rack *rack;
5756 rack = (struct tcp_rack *)tp->t_fb_ptr;
5758 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
5759 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
5763 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
5769 rack_calc_rwin(so, tp);
5771 if (thflags & TH_RST)
5772 return (rack_process_rst(m, th, so, tp));
5775 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5776 * synchronized state.
5778 if (thflags & TH_SYN) {
5779 rack_challenge_ack(m, th, tp, &ret_val);
5783 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5784 * it's less than ts_recent, drop it.
5786 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5787 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5788 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val))
5791 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
5795 * If last ACK falls within this segment's sequence numbers, record
5796 * its timestamp. NOTE: 1) That the test incorporates suggestions
5797 * from the latest proposal of the tcplw@cray.com list (Braden
5798 * 1993/04/26). 2) That updating only on newer timestamps interferes
5799 * with our earlier PAWS tests, so this check should be solely
5800 * predicated on the sequence space of this segment. 3) That we
5801 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
5802 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
5803 * SEG.Len, This modified check allows us to overcome RFC1323's
5804 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
5805 * p.869. In such cases, we can still calculate the RTT correctly
5806 * when RCV.NXT == Last.ACK.Sent.
5808 if ((to->to_flags & TOF_TS) != 0 &&
5809 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
5810 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
5811 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
5812 tp->ts_recent_age = tcp_ts_getticks();
5813 tp->ts_recent = to->to_tsval;
5816 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
5817 * is on (half-synchronized state), then queue data for later
5818 * processing; else drop segment and return.
5820 if ((thflags & TH_ACK) == 0) {
5821 if (tp->t_flags & TF_NEEDSYN) {
5823 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5824 tiwin, thflags, nxt_pkt));
5826 } else if (tp->t_flags & TF_ACKNOW) {
5827 rack_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
5830 rack_do_drop(m, NULL);
5837 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
5840 if (sbavail(&so->so_snd)) {
5841 if (rack_progress_timeout_check(tp)) {
5842 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
5843 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
5847 /* State changes only happen in rack_process_data() */
5848 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5849 tiwin, thflags, nxt_pkt));
5853 * Return value of 1, the TCB is unlocked and most
5854 * likely gone, return value of 0, the TCP is still
5858 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
5859 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5860 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5862 int32_t ret_val = 0;
5864 rack_calc_rwin(so, tp);
5865 if (thflags & TH_RST)
5866 return (rack_process_rst(m, th, so, tp));
5868 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5869 * synchronized state.
5871 if (thflags & TH_SYN) {
5872 rack_challenge_ack(m, th, tp, &ret_val);
5876 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5877 * it's less than ts_recent, drop it.
5879 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5880 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5881 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val))
5884 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
5888 * If last ACK falls within this segment's sequence numbers, record
5889 * its timestamp. NOTE: 1) That the test incorporates suggestions
5890 * from the latest proposal of the tcplw@cray.com list (Braden
5891 * 1993/04/26). 2) That updating only on newer timestamps interferes
5892 * with our earlier PAWS tests, so this check should be solely
5893 * predicated on the sequence space of this segment. 3) That we
5894 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
5895 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
5896 * SEG.Len, This modified check allows us to overcome RFC1323's
5897 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
5898 * p.869. In such cases, we can still calculate the RTT correctly
5899 * when RCV.NXT == Last.ACK.Sent.
5901 if ((to->to_flags & TOF_TS) != 0 &&
5902 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
5903 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
5904 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
5905 tp->ts_recent_age = tcp_ts_getticks();
5906 tp->ts_recent = to->to_tsval;
5909 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
5910 * is on (half-synchronized state), then queue data for later
5911 * processing; else drop segment and return.
5913 if ((thflags & TH_ACK) == 0) {
5914 if (tp->t_flags & TF_NEEDSYN) {
5915 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5916 tiwin, thflags, nxt_pkt));
5918 } else if (tp->t_flags & TF_ACKNOW) {
5919 rack_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
5922 rack_do_drop(m, NULL);
5929 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
5932 if (sbavail(&so->so_snd)) {
5933 if (rack_progress_timeout_check(tp)) {
5934 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
5935 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
5939 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5940 tiwin, thflags, nxt_pkt));
5944 rack_check_data_after_close(struct mbuf *m,
5945 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
5947 struct tcp_rack *rack;
5949 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
5950 rack = (struct tcp_rack *)tp->t_fb_ptr;
5951 if (rack->rc_allow_data_af_clo == 0) {
5954 TCPSTAT_INC(tcps_rcvafterclose);
5955 rack_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
5958 if (sbavail(&so->so_snd) == 0)
5960 /* Ok we allow data that is ignored and a followup reset */
5961 tp->rcv_nxt = th->th_seq + *tlen;
5962 tp->t_flags2 |= TF2_DROP_AF_DATA;
5963 rack->r_wanted_output = 1;
5969 * Return value of 1, the TCB is unlocked and most
5970 * likely gone, return value of 0, the TCP is still
5974 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
5975 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5976 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5978 int32_t ret_val = 0;
5979 int32_t ourfinisacked = 0;
5981 rack_calc_rwin(so, tp);
5983 if (thflags & TH_RST)
5984 return (rack_process_rst(m, th, so, tp));
5986 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5987 * synchronized state.
5989 if (thflags & TH_SYN) {
5990 rack_challenge_ack(m, th, tp, &ret_val);
5994 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5995 * it's less than ts_recent, drop it.
5997 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5998 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5999 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val))
6002 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
6006 * If new data are received on a connection after the user processes
6007 * are gone, then RST the other end.
6009 if ((so->so_state & SS_NOFDREF) && tlen) {
6010 if (rack_check_data_after_close(m, tp, &tlen, th, so))
6014 * If last ACK falls within this segment's sequence numbers, record
6015 * its timestamp. NOTE: 1) That the test incorporates suggestions
6016 * from the latest proposal of the tcplw@cray.com list (Braden
6017 * 1993/04/26). 2) That updating only on newer timestamps interferes
6018 * with our earlier PAWS tests, so this check should be solely
6019 * predicated on the sequence space of this segment. 3) That we
6020 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6021 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6022 * SEG.Len, This modified check allows us to overcome RFC1323's
6023 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6024 * p.869. In such cases, we can still calculate the RTT correctly
6025 * when RCV.NXT == Last.ACK.Sent.
6027 if ((to->to_flags & TOF_TS) != 0 &&
6028 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6029 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6030 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6031 tp->ts_recent_age = tcp_ts_getticks();
6032 tp->ts_recent = to->to_tsval;
6035 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
6036 * is on (half-synchronized state), then queue data for later
6037 * processing; else drop segment and return.
6039 if ((thflags & TH_ACK) == 0) {
6040 if (tp->t_flags & TF_NEEDSYN) {
6041 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6042 tiwin, thflags, nxt_pkt));
6043 } else if (tp->t_flags & TF_ACKNOW) {
6044 rack_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
6047 rack_do_drop(m, NULL);
6054 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
6057 if (ourfinisacked) {
6059 * If we can't receive any more data, then closing user can
6060 * proceed. Starting the timer is contrary to the
6061 * specification, but if we don't get a FIN we'll hang
6064 * XXXjl: we should release the tp also, and use a
6067 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6068 soisdisconnected(so);
6069 tcp_timer_activate(tp, TT_2MSL,
6070 (tcp_fast_finwait2_recycle ?
6071 tcp_finwait2_timeout :
6074 tcp_state_change(tp, TCPS_FIN_WAIT_2);
6076 if (sbavail(&so->so_snd)) {
6077 if (rack_progress_timeout_check(tp)) {
6078 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6079 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
6083 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6084 tiwin, thflags, nxt_pkt));
6088 * Return value of 1, the TCB is unlocked and most
6089 * likely gone, return value of 0, the TCP is still
6093 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
6094 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6095 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
6097 int32_t ret_val = 0;
6098 int32_t ourfinisacked = 0;
6100 rack_calc_rwin(so, tp);
6102 if (thflags & TH_RST)
6103 return (rack_process_rst(m, th, so, tp));
6105 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
6106 * synchronized state.
6108 if (thflags & TH_SYN) {
6109 rack_challenge_ack(m, th, tp, &ret_val);
6113 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
6114 * it's less than ts_recent, drop it.
6116 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
6117 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
6118 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val))
6121 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
6125 * If new data are received on a connection after the user processes
6126 * are gone, then RST the other end.
6128 if ((so->so_state & SS_NOFDREF) && tlen) {
6129 if (rack_check_data_after_close(m, tp, &tlen, th, so))
6133 * If last ACK falls within this segment's sequence numbers, record
6134 * its timestamp. NOTE: 1) That the test incorporates suggestions
6135 * from the latest proposal of the tcplw@cray.com list (Braden
6136 * 1993/04/26). 2) That updating only on newer timestamps interferes
6137 * with our earlier PAWS tests, so this check should be solely
6138 * predicated on the sequence space of this segment. 3) That we
6139 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6140 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6141 * SEG.Len, This modified check allows us to overcome RFC1323's
6142 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6143 * p.869. In such cases, we can still calculate the RTT correctly
6144 * when RCV.NXT == Last.ACK.Sent.
6146 if ((to->to_flags & TOF_TS) != 0 &&
6147 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6148 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6149 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6150 tp->ts_recent_age = tcp_ts_getticks();
6151 tp->ts_recent = to->to_tsval;
6154 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
6155 * is on (half-synchronized state), then queue data for later
6156 * processing; else drop segment and return.
6158 if ((thflags & TH_ACK) == 0) {
6159 if (tp->t_flags & TF_NEEDSYN) {
6160 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6161 tiwin, thflags, nxt_pkt));
6162 } else if (tp->t_flags & TF_ACKNOW) {
6163 rack_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
6166 rack_do_drop(m, NULL);
6173 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
6176 if (ourfinisacked) {
6177 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6182 if (sbavail(&so->so_snd)) {
6183 if (rack_progress_timeout_check(tp)) {
6184 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6185 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
6189 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6190 tiwin, thflags, nxt_pkt));
6194 * Return value of 1, the TCB is unlocked and most
6195 * likely gone, return value of 0, the TCP is still
6199 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
6200 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6201 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
6203 int32_t ret_val = 0;
6204 int32_t ourfinisacked = 0;
6206 rack_calc_rwin(so, tp);
6208 if (thflags & TH_RST)
6209 return (rack_process_rst(m, th, so, tp));
6211 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
6212 * synchronized state.
6214 if (thflags & TH_SYN) {
6215 rack_challenge_ack(m, th, tp, &ret_val);
6219 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
6220 * it's less than ts_recent, drop it.
6222 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
6223 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
6224 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val))
6227 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
6231 * If new data are received on a connection after the user processes
6232 * are gone, then RST the other end.
6234 if ((so->so_state & SS_NOFDREF) && tlen) {
6235 if (rack_check_data_after_close(m, tp, &tlen, th, so))
6239 * If last ACK falls within this segment's sequence numbers, record
6240 * its timestamp. NOTE: 1) That the test incorporates suggestions
6241 * from the latest proposal of the tcplw@cray.com list (Braden
6242 * 1993/04/26). 2) That updating only on newer timestamps interferes
6243 * with our earlier PAWS tests, so this check should be solely
6244 * predicated on the sequence space of this segment. 3) That we
6245 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6246 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6247 * SEG.Len, This modified check allows us to overcome RFC1323's
6248 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6249 * p.869. In such cases, we can still calculate the RTT correctly
6250 * when RCV.NXT == Last.ACK.Sent.
6252 if ((to->to_flags & TOF_TS) != 0 &&
6253 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6254 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6255 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6256 tp->ts_recent_age = tcp_ts_getticks();
6257 tp->ts_recent = to->to_tsval;
6260 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
6261 * is on (half-synchronized state), then queue data for later
6262 * processing; else drop segment and return.
6264 if ((thflags & TH_ACK) == 0) {
6265 if (tp->t_flags & TF_NEEDSYN) {
6266 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6267 tiwin, thflags, nxt_pkt));
6268 } else if (tp->t_flags & TF_ACKNOW) {
6269 rack_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
6272 rack_do_drop(m, NULL);
6277 * case TCPS_LAST_ACK: Ack processing.
6279 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
6282 if (ourfinisacked) {
6283 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6285 rack_do_drop(m, tp);
6288 if (sbavail(&so->so_snd)) {
6289 if (rack_progress_timeout_check(tp)) {
6290 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6291 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
6295 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6296 tiwin, thflags, nxt_pkt));
6301 * Return value of 1, the TCB is unlocked and most
6302 * likely gone, return value of 0, the TCP is still
6306 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
6307 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6308 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
6310 int32_t ret_val = 0;
6311 int32_t ourfinisacked = 0;
6313 rack_calc_rwin(so, tp);
6315 /* Reset receive buffer auto scaling when not in bulk receive mode. */
6316 if (thflags & TH_RST)
6317 return (rack_process_rst(m, th, so, tp));
6319 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
6320 * synchronized state.
6322 if (thflags & TH_SYN) {
6323 rack_challenge_ack(m, th, tp, &ret_val);
6327 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
6328 * it's less than ts_recent, drop it.
6330 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
6331 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
6332 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val))
6335 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
6339 * If new data are received on a connection after the user processes
6340 * are gone, then RST the other end.
6342 if ((so->so_state & SS_NOFDREF) &&
6344 if (rack_check_data_after_close(m, tp, &tlen, th, so))
6348 * If last ACK falls within this segment's sequence numbers, record
6349 * its timestamp. NOTE: 1) That the test incorporates suggestions
6350 * from the latest proposal of the tcplw@cray.com list (Braden
6351 * 1993/04/26). 2) That updating only on newer timestamps interferes
6352 * with our earlier PAWS tests, so this check should be solely
6353 * predicated on the sequence space of this segment. 3) That we
6354 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6355 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6356 * SEG.Len, This modified check allows us to overcome RFC1323's
6357 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6358 * p.869. In such cases, we can still calculate the RTT correctly
6359 * when RCV.NXT == Last.ACK.Sent.
6361 if ((to->to_flags & TOF_TS) != 0 &&
6362 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6363 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6364 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6365 tp->ts_recent_age = tcp_ts_getticks();
6366 tp->ts_recent = to->to_tsval;
6369 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
6370 * is on (half-synchronized state), then queue data for later
6371 * processing; else drop segment and return.
6373 if ((thflags & TH_ACK) == 0) {
6374 if (tp->t_flags & TF_NEEDSYN) {
6375 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6376 tiwin, thflags, nxt_pkt));
6377 } else if (tp->t_flags & TF_ACKNOW) {
6378 rack_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
6381 rack_do_drop(m, NULL);
6388 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
6391 if (sbavail(&so->so_snd)) {
6392 if (rack_progress_timeout_check(tp)) {
6393 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6394 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
6398 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6399 tiwin, thflags, nxt_pkt));
6404 rack_clear_rate_sample(struct tcp_rack *rack)
6406 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
6407 rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
6408 rack->r_ctl.rack_rs.rs_rtt_tot = 0;
6412 rack_init(struct tcpcb *tp)
6414 struct tcp_rack *rack = NULL;
6416 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
6417 if (tp->t_fb_ptr == NULL) {
6419 * We need to allocate memory but cant. The INP and INP_INFO
6420 * locks and they are recusive (happens during setup. So a
6421 * scheme to drop the locks fails :(
6426 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack));
6428 rack = (struct tcp_rack *)tp->t_fb_ptr;
6429 TAILQ_INIT(&rack->r_ctl.rc_map);
6430 TAILQ_INIT(&rack->r_ctl.rc_free);
6431 TAILQ_INIT(&rack->r_ctl.rc_tmap);
6434 rack->rc_inp = tp->t_inpcb;
6436 /* Probably not needed but lets be sure */
6437 rack_clear_rate_sample(rack);
6439 rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
6440 rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
6441 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
6442 rack->rc_pace_reduce = rack_slot_reduction;
6443 if (V_tcp_delack_enabled)
6444 tp->t_delayed_ack = 1;
6446 tp->t_delayed_ack = 0;
6447 rack->rc_pace_max_segs = rack_hptsi_segments;
6448 rack->r_ctl.rc_early_recovery_segs = rack_early_recovery_max_seg;
6449 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
6450 rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
6451 rack->r_ctl.rc_prop_reduce = rack_use_proportional_reduce;
6452 rack->r_idle_reduce_largest = rack_reduce_largest_on_idle;
6453 rack->r_enforce_min_pace = rack_min_pace_time;
6454 rack->r_min_pace_seg_thresh = rack_min_pace_time_seg_req;
6455 rack->r_ctl.rc_prop_rate = rack_proportional_rate;
6456 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
6457 rack->r_ctl.rc_early_recovery = rack_early_recovery;
6458 rack->rc_always_pace = rack_pace_every_seg;
6459 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
6460 rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
6461 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
6462 rack->r_ctl.rc_min_to = rack_min_to;
6463 rack->r_ctl.rc_prr_inc_var = rack_inc_var;
6464 if (tp->snd_una != tp->snd_max) {
6465 /* Create a send map for the current outstanding data */
6466 struct rack_sendmap *rsm;
6468 rsm = rack_alloc(rack);
6470 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
6471 tp->t_fb_ptr = NULL;
6474 rsm->r_flags = RACK_OVERMAX;
6475 rsm->r_tim_lastsent[0] = tcp_ts_getticks();
6477 rsm->r_rtr_bytes = 0;
6478 rsm->r_start = tp->snd_una;
6479 rsm->r_end = tp->snd_max;
6481 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_map, rsm, r_next);
6482 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
6485 rack_stop_all_timers(tp);
6486 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0);
6491 rack_handoff_ok(struct tcpcb *tp)
6493 if ((tp->t_state == TCPS_CLOSED) ||
6494 (tp->t_state == TCPS_LISTEN)) {
6495 /* Sure no problem though it may not stick */
6498 if ((tp->t_state == TCPS_SYN_SENT) ||
6499 (tp->t_state == TCPS_SYN_RECEIVED)) {
6501 * We really don't know you have to get to ESTAB or beyond
6506 if (tp->t_flags & TF_SACK_PERMIT) {
6510 * If we reach here we don't do SACK on this connection so we can
6517 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
6520 struct tcp_rack *rack;
6521 struct rack_sendmap *rsm;
6523 rack = (struct tcp_rack *)tp->t_fb_ptr;
6525 tcp_log_flowend(tp);
6527 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
6529 TAILQ_REMOVE(&rack->r_ctl.rc_map, rsm, r_next);
6530 uma_zfree(rack_zone, rsm);
6531 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
6533 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
6535 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_next);
6536 uma_zfree(rack_zone, rsm);
6537 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
6539 rack->rc_free_cnt = 0;
6540 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
6541 tp->t_fb_ptr = NULL;
6543 /* Make sure snd_nxt is correctly set */
6544 tp->snd_nxt = tp->snd_max;
6548 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
6550 switch (tp->t_state) {
6552 rack->r_state = TCPS_SYN_SENT;
6553 rack->r_substate = rack_do_syn_sent;
6555 case TCPS_SYN_RECEIVED:
6556 rack->r_state = TCPS_SYN_RECEIVED;
6557 rack->r_substate = rack_do_syn_recv;
6559 case TCPS_ESTABLISHED:
6560 rack->r_state = TCPS_ESTABLISHED;
6561 rack->r_substate = rack_do_established;
6563 case TCPS_CLOSE_WAIT:
6564 rack->r_state = TCPS_CLOSE_WAIT;
6565 rack->r_substate = rack_do_close_wait;
6567 case TCPS_FIN_WAIT_1:
6568 rack->r_state = TCPS_FIN_WAIT_1;
6569 rack->r_substate = rack_do_fin_wait_1;
6572 rack->r_state = TCPS_CLOSING;
6573 rack->r_substate = rack_do_closing;
6576 rack->r_state = TCPS_LAST_ACK;
6577 rack->r_substate = rack_do_lastack;
6579 case TCPS_FIN_WAIT_2:
6580 rack->r_state = TCPS_FIN_WAIT_2;
6581 rack->r_substate = rack_do_fin_wait_2;
6585 case TCPS_TIME_WAIT:
6593 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
6596 * We received an ack, and then did not
6597 * call send or were bounced out due to the
6598 * hpts was running. Now a timer is up as well, is
6599 * it the right timer?
6601 struct rack_sendmap *rsm;
6604 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
6605 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
6607 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6608 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
6609 (tmr_up == PACE_TMR_RXT)) {
6610 /* Should be an RXT */
6614 /* Nothing outstanding? */
6615 if (tp->t_flags & TF_DELACK) {
6616 if (tmr_up == PACE_TMR_DELACK)
6617 /* We are supposed to have delayed ack up and we do */
6619 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) {
6621 * if we hit enobufs then we would expect the possiblity
6622 * of nothing outstanding and the RXT up (and the hptsi timer).
6625 } else if (((V_tcp_always_keepalive ||
6626 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
6627 (tp->t_state <= TCPS_CLOSING)) &&
6628 (tmr_up == PACE_TMR_KEEP) &&
6629 (tp->snd_max == tp->snd_una)) {
6630 /* We should have keep alive up and we do */
6634 if (rsm && (rsm->r_flags & RACK_SACK_PASSED)) {
6635 if ((tp->t_flags & TF_SENTFIN) &&
6636 ((tp->snd_max - tp->snd_una) == 1) &&
6637 (rsm->r_flags & RACK_HAS_FIN)) {
6638 /* needs to be a RXT */
6639 if (tmr_up == PACE_TMR_RXT)
6641 } else if (tmr_up == PACE_TMR_RACK)
6643 } else if (SEQ_GT(tp->snd_max,tp->snd_una) &&
6644 ((tmr_up == PACE_TMR_TLP) ||
6645 (tmr_up == PACE_TMR_RXT))) {
6647 * Either a TLP or RXT is fine if no sack-passed
6648 * is in place and data is outstanding.
6651 } else if (tmr_up == PACE_TMR_DELACK) {
6653 * If the delayed ack was going to go off
6654 * before the rtx/tlp/rack timer were going to
6655 * expire, then that would be the timer in control.
6656 * Note we don't check the time here trusting the
6662 * Ok the timer originally started is not what we want now.
6663 * We will force the hpts to be stopped if any, and restart
6664 * with the slot set to what was in the saved slot.
6666 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
6667 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0);
6671 rack_hpts_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
6672 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
6673 int32_t nxt_pkt, struct timeval *tv)
6675 int32_t thflags, retval, did_out = 0;
6676 int32_t way_out = 0;
6680 struct tcp_rack *rack;
6681 struct rack_sendmap *rsm;
6682 int32_t prev_state = 0;
6684 cts = tcp_tv_to_mssectick(tv);
6685 rack = (struct tcp_rack *)tp->t_fb_ptr;
6687 kern_prefetch(rack, &prev_state);
6689 thflags = th->th_flags;
6691 * If this is either a state-changing packet or current state isn't
6692 * established, we require a read lock on tcbinfo. Otherwise, we
6693 * allow the tcbinfo to be in either locked or unlocked, as the
6694 * caller may have unnecessarily acquired a lock due to a race.
6696 INP_WLOCK_ASSERT(tp->t_inpcb);
6697 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
6699 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
6702 union tcp_log_stackspecific log;
6704 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
6705 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
6706 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
6707 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
6708 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
6713 * Parse options on any incoming segment.
6715 tcp_dooptions(&to, (u_char *)(th + 1),
6716 (th->th_off << 2) - sizeof(struct tcphdr),
6717 (thflags & TH_SYN) ? TO_SYN : 0);
6720 * If timestamps were negotiated during SYN/ACK and a
6721 * segment without a timestamp is received, silently drop
6722 * the segment, unless it is a RST segment or missing timestamps are
6724 * See section 3.2 of RFC 7323.
6726 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) &&
6727 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) {
6730 goto done_with_input;
6734 * Segment received on connection. Reset idle time and keep-alive
6735 * timer. XXX: This should be done after segment validation to
6736 * ignore broken/spoofed segs.
6738 if (tp->t_idle_reduce && (tp->snd_max == tp->snd_una)) {
6739 if ((ticks - tp->t_rcvtime) >= tp->t_rxtcur) {
6740 counter_u64_add(rack_input_idle_reduces, 1);
6741 rack_cc_after_idle(tp,
6742 (rack->r_idle_reduce_largest ? 1 :0));
6745 rack->r_ctl.rc_rcvtime = cts;
6746 tp->t_rcvtime = ticks;
6749 * Unscale the window into a 32-bit value. For the SYN_SENT state
6750 * the scale is zero.
6752 tiwin = th->th_win << tp->snd_scale;
6753 #ifdef NETFLIX_STATS
6754 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
6757 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
6758 * this to occur after we've validated the segment.
6760 if (tp->t_flags & TF_ECN_PERMIT) {
6761 if (thflags & TH_CWR) {
6762 tp->t_flags &= ~TF_ECN_SND_ECE;
6763 tp->t_flags |= TF_ACKNOW;
6765 switch (iptos & IPTOS_ECN_MASK) {
6767 tp->t_flags |= TF_ECN_SND_ECE;
6768 TCPSTAT_INC(tcps_ecn_ce);
6770 case IPTOS_ECN_ECT0:
6771 TCPSTAT_INC(tcps_ecn_ect0);
6773 case IPTOS_ECN_ECT1:
6774 TCPSTAT_INC(tcps_ecn_ect1);
6778 /* Process a packet differently from RFC3168. */
6779 cc_ecnpkt_handler(tp, th, iptos);
6781 /* Congestion experienced. */
6782 if (thflags & TH_ECE) {
6783 rack_cong_signal(tp, th, CC_ECN);
6788 * If echoed timestamp is later than the current time, fall back to
6789 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
6790 * were used when this connection was established.
6792 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
6793 to.to_tsecr -= tp->ts_offset;
6794 if (TSTMP_GT(to.to_tsecr, cts))
6798 * If its the first time in we need to take care of options and
6799 * verify we can do SACK for rack!
6801 if (rack->r_state == 0) {
6802 /* Should be init'd by rack_init() */
6803 KASSERT(rack->rc_inp != NULL,
6804 ("%s: rack->rc_inp unexpectedly NULL", __func__));
6805 if (rack->rc_inp == NULL) {
6806 rack->rc_inp = tp->t_inpcb;
6810 * Process options only when we get SYN/ACK back. The SYN
6811 * case for incoming connections is handled in tcp_syncache.
6812 * According to RFC1323 the window field in a SYN (i.e., a
6813 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
6814 * this is traditional behavior, may need to be cleaned up.
6816 rack->r_cpu = inp_to_cpuid(tp->t_inpcb);
6817 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
6818 if ((to.to_flags & TOF_SCALE) &&
6819 (tp->t_flags & TF_REQ_SCALE)) {
6820 tp->t_flags |= TF_RCVD_SCALE;
6821 tp->snd_scale = to.to_wscale;
6823 tp->t_flags &= ~TF_REQ_SCALE;
6825 * Initial send window. It will be updated with the
6826 * next incoming segment to the scaled value.
6828 tp->snd_wnd = th->th_win;
6829 if ((to.to_flags & TOF_TS) &&
6830 (tp->t_flags & TF_REQ_TSTMP)) {
6831 tp->t_flags |= TF_RCVD_TSTMP;
6832 tp->ts_recent = to.to_tsval;
6833 tp->ts_recent_age = cts;
6835 tp->t_flags &= ~TF_REQ_TSTMP;
6836 if (to.to_flags & TOF_MSS)
6837 tcp_mss(tp, to.to_mss);
6838 if ((tp->t_flags & TF_SACK_PERMIT) &&
6839 (to.to_flags & TOF_SACKPERM) == 0)
6840 tp->t_flags &= ~TF_SACK_PERMIT;
6843 * At this point we are at the initial call. Here we decide
6844 * if we are doing RACK or not. We do this by seeing if
6845 * TF_SACK_PERMIT is set, if not rack is *not* possible and
6846 * we switch to the default code.
6848 if ((tp->t_flags & TF_SACK_PERMIT) == 0) {
6849 tcp_switch_back_to_default(tp);
6850 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen,
6855 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
6856 tcp_set_hpts(tp->t_inpcb);
6857 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
6860 * This is the one exception case where we set the rack state
6861 * always. All other times (timers etc) we must have a rack-state
6862 * set (so we assure we have done the checks above for SACK).
6864 if (rack->r_state != tp->t_state)
6865 rack_set_state(tp, rack);
6866 if (SEQ_GT(th->th_ack, tp->snd_una) && (rsm = TAILQ_FIRST(&rack->r_ctl.rc_map)) != NULL)
6867 kern_prefetch(rsm, &prev_state);
6868 prev_state = rack->r_state;
6869 rack->r_ctl.rc_tlp_send_cnt = 0;
6870 rack_clear_rate_sample(rack);
6871 retval = (*rack->r_substate) (m, th, so,
6872 tp, &to, drop_hdrlen,
6873 tlen, tiwin, thflags, nxt_pkt);
6875 if ((retval == 0) &&
6876 (tp->t_inpcb == NULL)) {
6877 panic("retval:%d tp:%p t_inpcb:NULL state:%d",
6878 retval, tp, prev_state);
6883 * If retval is 1 the tcb is unlocked and most likely the tp
6886 INP_WLOCK_ASSERT(tp->t_inpcb);
6887 tcp_rack_xmit_timer_commit(rack, tp);
6889 if (rack->r_wanted_output != 0) {
6891 (void)tp->t_fb->tfb_tcp_output(tp);
6893 rack_start_hpts_timer(rack, tp, cts, __LINE__, 0, 0, 0);
6895 if (((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
6896 (SEQ_GT(tp->snd_max, tp->snd_una) ||
6897 (tp->t_flags & TF_DELACK) ||
6898 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
6899 (tp->t_state <= TCPS_CLOSING)))) {
6900 /* We could not send (probably in the hpts but stopped the timer earlier)? */
6901 if ((tp->snd_max == tp->snd_una) &&
6902 ((tp->t_flags & TF_DELACK) == 0) &&
6903 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
6904 /* keep alive not needed if we are hptsi output yet */
6907 if (rack->rc_inp->inp_in_hpts)
6908 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
6909 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0);
6913 /* Do we have the correct timer running? */
6914 rack_timer_audit(tp, rack, &so->so_snd);
6918 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out);
6920 rack->r_wanted_output = 0;
6922 if (tp->t_inpcb == NULL) {
6923 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d",
6925 retval, tp, prev_state);
6928 INP_WUNLOCK(tp->t_inpcb);
6933 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
6934 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos)
6938 struct tcp_function_block *tfb;
6939 struct tcp_rack *rack;
6942 rack = (struct tcp_rack *)tp->t_fb_ptr;
6943 if (rack->r_state == 0) {
6945 * Initial input (ACK to SYN-ACK etc)lets go ahead and get
6949 rack_hpts_do_segment(m, th, so, tp, drop_hdrlen,
6950 tlen, iptos, 0, &tv);
6953 tcp_queue_to_input(tp, m, th, tlen, drop_hdrlen, iptos);
6954 INP_WUNLOCK(tp->t_inpcb);
6957 rack_hpts_do_segment(m, th, so, tp, drop_hdrlen,
6958 tlen, iptos, 0, &tv);
6962 struct rack_sendmap *
6963 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
6965 struct rack_sendmap *rsm = NULL;
6967 uint32_t srtt_cur, srtt = 0, thresh = 0, ts_low = 0;
6969 /* Return the next guy to be re-transmitted */
6970 if (TAILQ_EMPTY(&rack->r_ctl.rc_map)) {
6973 if (tp->t_flags & TF_SENTFIN) {
6974 /* retran the end FIN? */
6977 /* ok lets look at this one */
6978 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6979 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
6982 rsm = rack_find_lowest_rsm(rack);
6987 srtt_cur = tp->t_srtt >> TCP_RTT_SHIFT;
6988 srtt = TICKS_2_MSEC(srtt_cur);
6989 if (rack->rc_rack_rtt && (srtt > rack->rc_rack_rtt))
6990 srtt = rack->rc_rack_rtt;
6991 if (rsm->r_flags & RACK_ACKED) {
6994 if ((rsm->r_flags & RACK_SACK_PASSED) == 0) {
6995 /* Its not yet ready */
6998 idx = rsm->r_rtr_cnt - 1;
6999 ts_low = rsm->r_tim_lastsent[idx];
7000 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
7001 if (tsused <= ts_low) {
7004 if ((tsused - ts_low) >= thresh) {
7011 rack_output(struct tcpcb *tp)
7014 uint32_t recwin, sendwin;
7016 int32_t len, flags, error = 0;
7019 uint32_t if_hw_tsomaxsegcount = 0;
7020 uint32_t if_hw_tsomaxsegsize = 0;
7021 long tot_len_this_send = 0;
7022 struct ip *ip = NULL;
7024 struct ipovly *ipov = NULL;
7026 #ifdef NETFLIX_TCP_O_UDP
7027 struct udphdr *udp = NULL;
7029 struct tcp_rack *rack;
7032 u_char opt[TCP_MAXOLEN];
7033 unsigned ipoptlen, optlen, hdrlen;
7034 #ifdef NETFLIX_TCP_O_UDP
7039 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
7040 unsigned ipsec_optlen = 0;
7043 int32_t idle, sendalot;
7044 int32_t sub_from_prr = 0;
7045 volatile int32_t sack_rxmit;
7046 struct rack_sendmap *rsm = NULL;
7047 int32_t tso, mtu, would_have_fin = 0;
7051 uint8_t hpts_calling, doing_tlp = 0;
7052 int32_t do_a_prefetch;
7053 int32_t prefetch_rsm = 0;
7054 int32_t prefetch_so_done = 0;
7055 struct tcp_log_buffer *lgb = NULL;
7059 struct ip6_hdr *ip6 = NULL;
7062 /* setup and take the cache hits here */
7063 rack = (struct tcp_rack *)tp->t_fb_ptr;
7065 so = inp->inp_socket;
7067 kern_prefetch(sb, &do_a_prefetch);
7070 INP_WLOCK_ASSERT(inp);
7072 if (tp->t_flags & TF_TOE)
7073 return (tcp_offload_output(tp));
7078 * For TFO connections in SYN_RECEIVED, only allow the initial
7079 * SYN|ACK and those sent by the retransmit timer.
7081 if ((tp->t_flags & TF_FASTOPEN) &&
7082 (tp->t_state == TCPS_SYN_RECEIVED) &&
7083 SEQ_GT(tp->snd_max, tp->snd_una) && /* inital SYN|ACK sent */
7084 (tp->snd_nxt != tp->snd_una)) /* not a retransmit */
7088 if (rack->r_state) {
7089 /* Use the cache line loaded if possible */
7090 isipv6 = rack->r_is_v6;
7092 isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
7095 cts = tcp_ts_getticks();
7096 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
7099 * We are on the hpts for some timer but not hptsi output.
7100 * Remove from the hpts unconditionally.
7102 rack_timer_cancel(tp, rack, cts, __LINE__);
7104 /* Mark that we have called rack_output(). */
7105 if ((rack->r_timer_override) ||
7106 (tp->t_flags & TF_FORCEDATA) ||
7107 (tp->t_state < TCPS_ESTABLISHED)) {
7108 if (tp->t_inpcb->inp_in_hpts)
7109 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
7110 } else if (tp->t_inpcb->inp_in_hpts) {
7112 * On the hpts you can't pass even if ACKNOW is on, we will
7113 * when the hpts fires.
7115 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
7118 hpts_calling = inp->inp_hpts_calls;
7119 inp->inp_hpts_calls = 0;
7120 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
7121 if (rack_process_timers(tp, rack, cts, hpts_calling)) {
7122 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
7126 rack->r_wanted_output = 0;
7127 rack->r_timer_override = 0;
7129 * Determine length of data that should be transmitted, and flags
7130 * that will be used. If there is some data or critical controls
7131 * (SYN, RST) to send, then transmit; otherwise, investigate
7134 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
7135 if (tp->t_idle_reduce) {
7136 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur))
7137 rack_cc_after_idle(tp,
7138 (rack->r_idle_reduce_largest ? 1 :0));
7140 tp->t_flags &= ~TF_LASTIDLE;
7142 if (tp->t_flags & TF_MORETOCOME) {
7143 tp->t_flags |= TF_LASTIDLE;
7149 * If we've recently taken a timeout, snd_max will be greater than
7150 * snd_nxt. There may be SACK information that allows us to avoid
7151 * resending already delivered data. Adjust snd_nxt accordingly.
7154 cts = tcp_ts_getticks();
7157 sb_offset = tp->snd_max - tp->snd_una;
7158 sendwin = min(tp->snd_wnd, tp->snd_cwnd);
7160 flags = tcp_outflags[tp->t_state];
7162 * Send any SACK-generated retransmissions. If we're explicitly
7163 * trying to send out new data (when sendalot is 1), bypass this
7164 * function. If we retransmit in fast recovery mode, decrement
7165 * snd_cwnd, since we're replacing a (future) new transmission with
7166 * a retransmission now, and we previously incremented snd_cwnd in
7170 * Still in sack recovery , reset rxmit flag to zero.
7172 while (rack->rc_free_cnt < rack_free_cache) {
7173 rsm = rack_alloc(rack);
7175 if (inp->inp_hpts_calls)
7178 goto just_return_nolock;
7180 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_next);
7181 rack->rc_free_cnt++;
7184 if (inp->inp_hpts_calls)
7185 inp->inp_hpts_calls = 0;
7189 if (flags & TH_RST) {
7193 if (rack->r_ctl.rc_tlpsend) {
7194 /* Tail loss probe */
7199 rsm = rack->r_ctl.rc_tlpsend;
7200 rack->r_ctl.rc_tlpsend = NULL;
7202 tlen = rsm->r_end - rsm->r_start;
7203 if (tlen > tp->t_maxseg)
7204 tlen = tp->t_maxseg;
7206 if (SEQ_GT(tp->snd_una, rsm->r_start)) {
7207 panic("tp:%p rack:%p snd_una:%u rsm:%p r_start:%u",
7208 tp, rack, tp->snd_una, rsm, rsm->r_start);
7211 sb_offset = rsm->r_start - tp->snd_una;
7212 cwin = min(tp->snd_wnd, tlen);
7214 } else if (rack->r_ctl.rc_resend) {
7215 /* Retransmit timer */
7216 rsm = rack->r_ctl.rc_resend;
7217 rack->r_ctl.rc_resend = NULL;
7218 len = rsm->r_end - rsm->r_start;
7221 sb_offset = rsm->r_start - tp->snd_una;
7222 if (len >= tp->t_maxseg) {
7225 KASSERT(sb_offset >= 0, ("%s: sack block to the left of una : %d",
7226 __func__, sb_offset));
7227 } else if ((rack->rc_in_persist == 0) &&
7228 ((rsm = tcp_rack_output(tp, rack, cts)) != NULL)) {
7231 if ((!IN_RECOVERY(tp->t_flags)) &&
7232 ((tp->t_flags & (TF_WASFRECOVERY | TF_WASCRECOVERY)) == 0)) {
7233 /* Enter recovery if not induced by a time-out */
7234 rack->r_ctl.rc_rsm_start = rsm->r_start;
7235 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
7236 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
7237 rack_cong_signal(tp, NULL, CC_NDUPACK);
7239 * When we enter recovery we need to assure we send
7242 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
7245 if (SEQ_LT(rsm->r_start, tp->snd_una)) {
7246 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
7247 tp, rack, rsm, rsm->r_start, tp->snd_una);
7250 tlen = rsm->r_end - rsm->r_start;
7251 sb_offset = rsm->r_start - tp->snd_una;
7252 if (tlen > rack->r_ctl.rc_prr_sndcnt) {
7253 len = rack->r_ctl.rc_prr_sndcnt;
7257 if (len >= tp->t_maxseg) {
7262 if ((rack->rc_timer_up == 0) &&
7265 * If its not a timer don't send a partial
7269 goto just_return_nolock;
7272 KASSERT(sb_offset >= 0, ("%s: sack block to the left of una : %d",
7273 __func__, sb_offset));
7277 TCPSTAT_INC(tcps_sack_rexmits);
7278 TCPSTAT_ADD(tcps_sack_rexmit_bytes,
7279 min(len, tp->t_maxseg));
7280 counter_u64_add(rack_rtm_prr_retran, 1);
7283 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
7284 /* we are retransmitting the fin */
7288 * When retransmitting data do *not* include the
7289 * FIN. This could happen from a TLP probe.
7296 rack->r_ctl.rc_rsm_at_retran = rsm;
7299 * Enforce a connection sendmap count limit if set
7300 * as long as we are not retransmiting.
7302 if ((rsm == NULL) &&
7303 (rack_map_entries_limit > 0) &&
7304 (rack->r_ctl.rc_num_maps_alloced >= rack_map_entries_limit)) {
7305 counter_u64_add(rack_to_alloc_limited, 1);
7306 if (!rack->alloc_limit_reported) {
7307 rack->alloc_limit_reported = 1;
7308 counter_u64_add(rack_alloc_limited_conns, 1);
7310 goto just_return_nolock;
7313 * Get standard flags, and add SYN or FIN if requested by 'hidden'
7316 if (tp->t_flags & TF_NEEDFIN)
7318 if (tp->t_flags & TF_NEEDSYN)
7320 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
7322 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
7324 kern_prefetch(end_rsm, &prefetch_rsm);
7329 * If in persist timeout with window of 0, send 1 byte. Otherwise,
7330 * if window is small but nonzero and time TF_SENTFIN expired, we
7331 * will send what we can and go to transmit state.
7333 if (tp->t_flags & TF_FORCEDATA) {
7336 * If we still have some data to send, then clear
7337 * the FIN bit. Usually this would happen below
7338 * when it realizes that we aren't sending all the
7339 * data. However, if we have exactly 1 byte of
7340 * unsent data, then it won't clear the FIN bit
7341 * below, and if we are in persist state, we wind up
7342 * sending the packet without recording that we sent
7345 * We can't just blindly clear the FIN bit, because
7346 * if we don't have any more data to send then the
7347 * probe will be the FIN itself.
7349 if (sb_offset < sbused(sb))
7353 if (rack->rc_in_persist)
7354 rack_exit_persist(tp, rack);
7356 * If we are dropping persist mode then we need to
7357 * correct snd_nxt/snd_max and off.
7359 tp->snd_nxt = tp->snd_max;
7360 sb_offset = tp->snd_nxt - tp->snd_una;
7364 * If snd_nxt == snd_max and we have transmitted a FIN, the
7365 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
7366 * negative length. This can also occur when TCP opens up its
7367 * congestion window while receiving additional duplicate acks after
7368 * fast-retransmit because TCP will reset snd_nxt to snd_max after
7369 * the fast-retransmit.
7371 * In the normal retransmit-FIN-only case, however, snd_nxt will be
7372 * set to snd_una, the sb_offset will be 0, and the length may wind
7375 * If sack_rxmit is true we are retransmitting from the scoreboard
7376 * in which case len is already set.
7378 if (sack_rxmit == 0) {
7381 avail = sbavail(sb);
7382 if (SEQ_GT(tp->snd_nxt, tp->snd_una))
7383 sb_offset = tp->snd_nxt - tp->snd_una;
7386 if (IN_FASTRECOVERY(tp->t_flags) == 0) {
7387 if (rack->r_ctl.rc_tlp_new_data) {
7388 /* TLP is forcing out new data */
7389 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
7390 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
7392 if (rack->r_ctl.rc_tlp_new_data > tp->snd_wnd)
7395 len = rack->r_ctl.rc_tlp_new_data;
7396 rack->r_ctl.rc_tlp_new_data = 0;
7399 if (sendwin > avail) {
7400 /* use the available */
7401 if (avail > sb_offset) {
7402 len = (int32_t)(avail - sb_offset);
7407 if (sendwin > sb_offset) {
7408 len = (int32_t)(sendwin - sb_offset);
7415 uint32_t outstanding;
7418 * We are inside of a SACK recovery episode and are
7419 * sending new data, having retransmitted all the
7420 * data possible so far in the scoreboard.
7422 outstanding = tp->snd_max - tp->snd_una;
7423 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) {
7424 if (tp->snd_wnd > outstanding) {
7425 len = tp->snd_wnd - outstanding;
7426 /* Check to see if we have the data */
7427 if (((sb_offset + len) > avail) &&
7428 (avail > sb_offset))
7429 len = avail - sb_offset;
7434 } else if (avail > sb_offset)
7435 len = avail - sb_offset;
7439 if (len > rack->r_ctl.rc_prr_sndcnt)
7440 len = rack->r_ctl.rc_prr_sndcnt;
7444 counter_u64_add(rack_rtm_prr_newdata, 1);
7447 if (len > tp->t_maxseg) {
7449 * We should never send more than a MSS when
7450 * retransmitting or sending new data in prr
7451 * mode unless the override flag is on. Most
7452 * likely the PRR algorithm is not going to
7453 * let us send a lot as well :-)
7455 if (rack->r_ctl.rc_prr_sendalot == 0)
7457 } else if (len < tp->t_maxseg) {
7459 * Do we send any? The idea here is if the
7460 * send empty's the socket buffer we want to
7461 * do it. However if not then lets just wait
7462 * for our prr_sndcnt to get bigger.
7466 leftinsb = sbavail(sb) - sb_offset;
7467 if (leftinsb > len) {
7468 /* This send does not empty the sb */
7474 if (prefetch_so_done == 0) {
7475 kern_prefetch(so, &prefetch_so_done);
7476 prefetch_so_done = 1;
7479 * Lop off SYN bit if it has already been sent. However, if this is
7480 * SYN-SENT state and if segment contains data and if we don't know
7481 * that foreign host supports TAO, suppress sending segment.
7483 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) {
7484 if ((tp->t_state != TCPS_SYN_RECEIVED) &&
7485 (tp->t_state != TCPS_SYN_SENT))
7489 * When sending additional segments following a TFO SYN|ACK,
7490 * do not include the SYN bit.
7492 if ((tp->t_flags & TF_FASTOPEN) &&
7493 (tp->t_state == TCPS_SYN_RECEIVED))
7498 * Be careful not to send data and/or FIN on SYN segments. This
7499 * measure is needed to prevent interoperability problems with not
7500 * fully conformant TCP implementations.
7502 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
7508 * When retransmitting SYN|ACK on a passively-created TFO socket,
7509 * don't include data, as the presence of data may have caused the
7510 * original SYN|ACK to have been dropped by a middlebox.
7512 if ((tp->t_flags & TF_FASTOPEN) &&
7513 ((tp->t_state == TCPS_SYN_RECEIVED) && (tp->t_rxtshift > 0)))
7518 * If FIN has been sent but not acked, but we haven't been
7519 * called to retransmit, len will be < 0. Otherwise, window
7520 * shrank after we sent into it. If window shrank to 0,
7521 * cancel pending retransmit, pull snd_nxt back to (closed)
7522 * window, and set the persist timer if it isn't already
7523 * going. If the window didn't close completely, just wait
7526 * We also do a general check here to ensure that we will
7527 * set the persist timer when we have data to send, but a
7528 * 0-byte window. This makes sure the persist timer is set
7529 * even if the packet hits one of the "goto send" lines
7533 if ((tp->snd_wnd == 0) &&
7534 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
7535 (sb_offset < (int)sbavail(sb))) {
7536 tp->snd_nxt = tp->snd_una;
7537 rack_enter_persist(tp, rack, cts);
7540 /* len will be >= 0 after this point. */
7541 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
7542 tcp_sndbuf_autoscale(tp, so, sendwin);
7544 * Decide if we can use TCP Segmentation Offloading (if supported by
7547 * TSO may only be used if we are in a pure bulk sending state. The
7548 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
7549 * options prevent using TSO. With TSO the TCP header is the same
7550 * (except for the sequence number) for all generated packets. This
7551 * makes it impossible to transmit any options which vary per
7552 * generated segment or packet.
7554 * IPv4 handling has a clear separation of ip options and ip header
7555 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
7556 * the right thing below to provide length of just ip options and thus
7557 * checking for ipoptlen is enough to decide if ip options are present.
7562 ipoptlen = ip6_optlen(tp->t_inpcb);
7565 if (tp->t_inpcb->inp_options)
7566 ipoptlen = tp->t_inpcb->inp_options->m_len -
7567 offsetof(struct ipoption, ipopt_list);
7570 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
7572 * Pre-calculate here as we save another lookup into the darknesses
7573 * of IPsec that way and can actually decide if TSO is ok.
7576 if (isipv6 && IPSEC_ENABLED(ipv6))
7577 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb);
7583 if (IPSEC_ENABLED(ipv4))
7584 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb);
7588 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
7589 ipoptlen += ipsec_optlen;
7591 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > tp->t_maxseg &&
7592 #ifdef NETFLIX_TCP_O_UDP
7593 (tp->t_port == 0) &&
7595 ((tp->t_flags & TF_SIGNATURE) == 0) &&
7596 tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
7600 uint32_t outstanding;
7602 outstanding = tp->snd_max - tp->snd_una;
7603 if (tp->t_flags & TF_SENTFIN) {
7605 * If we sent a fin, snd_max is 1 higher than
7610 if (outstanding > 0) {
7612 * This is sub-optimal. We only send a stand alone
7613 * FIN on its own segment.
7615 if (flags & TH_FIN) {
7619 } else if (sack_rxmit) {
7620 if ((rsm->r_flags & RACK_HAS_FIN) == 0)
7623 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
7628 recwin = sbspace(&so->so_rcv);
7631 * Sender silly window avoidance. We transmit under the following
7632 * conditions when len is non-zero:
7634 * - We have a full segment (or more with TSO) - This is the last
7635 * buffer in a write()/send() and we are either idle or running
7636 * NODELAY - we've timed out (e.g. persist timer) - we have more
7637 * then 1/2 the maximum send window's worth of data (receiver may be
7638 * limited the window size) - we need to retransmit
7641 if (len >= tp->t_maxseg) {
7646 * NOTE! on localhost connections an 'ack' from the remote
7647 * end may occur synchronously with the output and cause us
7648 * to flush a buffer queued with moretocome. XXX
7651 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
7652 (idle || (tp->t_flags & TF_NODELAY)) &&
7653 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(&so->so_snd)) &&
7654 (tp->t_flags & TF_NOPUSH) == 0) {
7658 if (tp->t_flags & TF_FORCEDATA) { /* typ. timeout case */
7662 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */
7665 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
7669 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */
7679 * Sending of standalone window updates.
7681 * Window updates are important when we close our window due to a
7682 * full socket buffer and are opening it again after the application
7683 * reads data from it. Once the window has opened again and the
7684 * remote end starts to send again the ACK clock takes over and
7685 * provides the most current window information.
7687 * We must avoid the silly window syndrome whereas every read from
7688 * the receive buffer, no matter how small, causes a window update
7689 * to be sent. We also should avoid sending a flurry of window
7690 * updates when the socket buffer had queued a lot of data and the
7691 * application is doing small reads.
7693 * Prevent a flurry of pointless window updates by only sending an
7694 * update when we can increase the advertized window by more than
7695 * 1/4th of the socket buffer capacity. When the buffer is getting
7696 * full or is very small be more aggressive and send an update
7697 * whenever we can increase by two mss sized segments. In all other
7698 * situations the ACK's to new incoming data will carry further
7701 * Don't send an independent window update if a delayed ACK is
7702 * pending (it will get piggy-backed on it) or the remote side
7703 * already has done a half-close and won't send more data. Skip
7704 * this if the connection is in T/TCP half-open state.
7706 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
7707 !(tp->t_flags & TF_DELACK) &&
7708 !TCPS_HAVERCVDFIN(tp->t_state)) {
7710 * "adv" is the amount we could increase the window, taking
7711 * into account that we are limited by TCP_MAXWIN <<
7717 adv = min(recwin, (long)TCP_MAXWIN << tp->rcv_scale);
7718 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
7719 oldwin = (tp->rcv_adv - tp->rcv_nxt);
7725 * If the new window size ends up being the same as the old
7726 * size when it is scaled, then don't force a window update.
7728 if (oldwin >> tp->rcv_scale == (adv + oldwin) >> tp->rcv_scale)
7731 if (adv >= (int32_t)(2 * tp->t_maxseg) &&
7732 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
7733 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
7734 so->so_rcv.sb_hiwat <= 8 * tp->t_maxseg)) {
7738 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat)
7744 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
7745 * is also a catch-all for the retransmit timer timeout case.
7747 if (tp->t_flags & TF_ACKNOW) {
7751 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
7755 if (SEQ_GT(tp->snd_up, tp->snd_una)) {
7760 * If our state indicates that FIN should be sent and we have not
7761 * yet done so, then we need to send.
7763 if (flags & TH_FIN) {
7764 if ((tp->t_flags & TF_SENTFIN) ||
7765 (((tp->t_flags & TF_SENTFIN) == 0) &&
7766 (tp->snd_nxt == tp->snd_una))) {
7772 * No reason to send a segment, just return.
7777 if (tot_len_this_send == 0)
7778 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
7779 rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, tot_len_this_send, 1);
7780 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling);
7781 tp->t_flags &= ~TF_FORCEDATA;
7785 if (doing_tlp == 0) {
7787 * Data not a TLP, and its not the rxt firing. If it is the
7788 * rxt firing, we want to leave the tlp_in_progress flag on
7789 * so we don't send another TLP. It has to be a rack timer
7790 * or normal send (response to acked data) to clear the tlp
7793 rack->rc_tlp_in_progress = 0;
7795 SOCKBUF_LOCK_ASSERT(sb);
7797 if (len >= tp->t_maxseg)
7798 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
7800 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
7803 * Before ESTABLISHED, force sending of initial options unless TCP
7804 * set not to do any options. NOTE: we assume that the IP/TCP header
7805 * plus TCP options always fit in a single mbuf, leaving room for a
7806 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
7807 * + optlen <= MCLBYTES
7812 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
7815 hdrlen = sizeof(struct tcpiphdr);
7818 * Compute options for segment. We only have to care about SYN and
7819 * established connection segments. Options for SYN-ACK segments
7820 * are handled in TCP syncache.
7823 if ((tp->t_flags & TF_NOOPT) == 0) {
7824 /* Maximum segment size. */
7825 if (flags & TH_SYN) {
7826 tp->snd_nxt = tp->iss;
7827 to.to_mss = tcp_mssopt(&inp->inp_inc);
7828 #ifdef NETFLIX_TCP_O_UDP
7830 to.to_mss -= V_tcp_udp_tunneling_overhead;
7832 to.to_flags |= TOF_MSS;
7835 * Only include the TFO option on the first
7836 * transmission of the SYN|ACK on a
7837 * passively-created TFO socket, as the presence of
7838 * the TFO option may have caused the original
7839 * SYN|ACK to have been dropped by a middlebox.
7841 if ((tp->t_flags & TF_FASTOPEN) &&
7842 (tp->t_state == TCPS_SYN_RECEIVED) &&
7843 (tp->t_rxtshift == 0)) {
7844 to.to_tfo_len = TCP_FASTOPEN_MAX_COOKIE_LEN;
7845 to.to_tfo_cookie = (u_char *)&tp->t_tfo_cookie;
7846 to.to_flags |= TOF_FASTOPEN;
7850 /* Window scaling. */
7851 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
7852 to.to_wscale = tp->request_r_scale;
7853 to.to_flags |= TOF_SCALE;
7856 if ((tp->t_flags & TF_RCVD_TSTMP) ||
7857 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
7858 to.to_tsval = cts + tp->ts_offset;
7859 to.to_tsecr = tp->ts_recent;
7860 to.to_flags |= TOF_TS;
7862 /* Set receive buffer autosizing timestamp. */
7863 if (tp->rfbuf_ts == 0 &&
7864 (so->so_rcv.sb_flags & SB_AUTOSIZE))
7865 tp->rfbuf_ts = tcp_ts_getticks();
7866 /* Selective ACK's. */
7868 to.to_flags |= TOF_SACKPERM;
7869 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
7870 tp->rcv_numsacks > 0) {
7871 to.to_flags |= TOF_SACK;
7872 to.to_nsacks = tp->rcv_numsacks;
7873 to.to_sacks = (u_char *)tp->sackblks;
7875 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
7876 /* TCP-MD5 (RFC2385). */
7877 if (tp->t_flags & TF_SIGNATURE)
7878 to.to_flags |= TOF_SIGNATURE;
7879 #endif /* TCP_SIGNATURE */
7881 /* Processing the options. */
7882 hdrlen += optlen = tcp_addoptions(&to, opt);
7884 #ifdef NETFLIX_TCP_O_UDP
7886 if (V_tcp_udp_tunneling_port == 0) {
7887 /* The port was removed?? */
7888 SOCKBUF_UNLOCK(&so->so_snd);
7889 return (EHOSTUNREACH);
7891 hdrlen += sizeof(struct udphdr);
7896 ipoptlen = ip6_optlen(tp->t_inpcb);
7899 if (tp->t_inpcb->inp_options)
7900 ipoptlen = tp->t_inpcb->inp_options->m_len -
7901 offsetof(struct ipoption, ipopt_list);
7904 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
7905 ipoptlen += ipsec_optlen;
7909 * Adjust data length if insertion of options will bump the packet
7910 * length beyond the t_maxseg length. Clear the FIN bit because we
7911 * cut off the tail of the segment.
7913 if (len + optlen + ipoptlen > tp->t_maxseg) {
7914 if (flags & TH_FIN) {
7919 uint32_t if_hw_tsomax;
7923 /* extract TSO information */
7924 if_hw_tsomax = tp->t_tsomax;
7925 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
7926 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
7927 KASSERT(ipoptlen == 0,
7928 ("%s: TSO can't do IP options", __func__));
7931 * Check if we should limit by maximum payload
7934 if (if_hw_tsomax != 0) {
7935 /* compute maximum TSO length */
7936 max_len = (if_hw_tsomax - hdrlen -
7940 } else if (len > max_len) {
7946 * Prevent the last segment from being fractional
7947 * unless the send sockbuf can be emptied:
7949 max_len = (tp->t_maxseg - optlen);
7950 if ((sb_offset + len) < sbavail(sb)) {
7951 moff = len % (u_int)max_len;
7958 * In case there are too many small fragments don't
7961 if (len <= max_len) {
7967 * Send the FIN in a separate segment after the bulk
7968 * sending is done. We don't trust the TSO
7969 * implementations to clear the FIN flag on all but
7972 if (tp->t_flags & TF_NEEDFIN)
7976 if (optlen + ipoptlen >= tp->t_maxseg) {
7978 * Since we don't have enough space to put
7979 * the IP header chain and the TCP header in
7980 * one packet as required by RFC 7112, don't
7981 * send it. Also ensure that at least one
7982 * byte of the payload can be put into the
7985 SOCKBUF_UNLOCK(&so->so_snd);
7990 len = tp->t_maxseg - optlen - ipoptlen;
7995 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
7996 ("%s: len > IP_MAXPACKET", __func__));
7999 if (max_linkhdr + hdrlen > MCLBYTES)
8001 if (max_linkhdr + hdrlen > MHLEN)
8003 panic("tcphdr too big");
8007 * This KASSERT is here to catch edge cases at a well defined place.
8008 * Before, those had triggered (random) panic conditions further
8011 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
8016 * We have outstanding data, don't send a fin by itself!.
8021 * Grab a header mbuf, attaching a copy of data to be transmitted,
8022 * and initialize the header from the template for sends on this
8029 if (rack->rc_pace_max_segs)
8030 max_val = rack->rc_pace_max_segs * tp->t_maxseg;
8034 * We allow a limit on sending with hptsi.
8036 if (len > max_val) {
8040 if (MHLEN < hdrlen + max_linkhdr)
8041 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
8044 m = m_gethdr(M_NOWAIT, MT_DATA);
8052 m->m_data += max_linkhdr;
8056 * Start the m_copy functions from the closest mbuf to the
8057 * sb_offset in the socket buffer chain.
8059 mb = sbsndptr_noadv(sb, sb_offset, &moff);
8060 if (len <= MHLEN - hdrlen - max_linkhdr) {
8061 m_copydata(mb, moff, (int)len,
8062 mtod(m, caddr_t)+hdrlen);
8063 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
8064 sbsndptr_adv(sb, mb, len);
8067 struct sockbuf *msb;
8069 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
8073 m->m_next = tcp_m_copym(/*tp, */ mb, moff, &len,
8074 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb /*, 0, NULL*/);
8075 if (len <= (tp->t_maxseg - optlen)) {
8077 * Must have ran out of mbufs for the copy
8078 * shorten it to no longer need tso. Lets
8079 * not put on sendalot since we are low on
8084 if (m->m_next == NULL) {
8092 if ((tp->t_flags & TF_FORCEDATA) && len == 1) {
8093 TCPSTAT_INC(tcps_sndprobe);
8094 #ifdef NETFLIX_STATS
8095 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
8096 stats_voi_update_abs_u32(tp->t_stats,
8097 VOI_TCP_RETXPB, len);
8099 stats_voi_update_abs_u64(tp->t_stats,
8102 } else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
8103 if (rsm && (rsm->r_flags & RACK_TLP)) {
8105 * TLP should not count in retran count, but
8108 /* tp->t_sndtlppack++;*/
8109 /* tp->t_sndtlpbyte += len;*/
8110 counter_u64_add(rack_tlp_retran, 1);
8111 counter_u64_add(rack_tlp_retran_bytes, len);
8113 tp->t_sndrexmitpack++;
8114 TCPSTAT_INC(tcps_sndrexmitpack);
8115 TCPSTAT_ADD(tcps_sndrexmitbyte, len);
8117 #ifdef NETFLIX_STATS
8118 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
8122 TCPSTAT_INC(tcps_sndpack);
8123 TCPSTAT_ADD(tcps_sndbyte, len);
8124 #ifdef NETFLIX_STATS
8125 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
8130 * If we're sending everything we've got, set PUSH. (This
8131 * will keep happy those implementations which only give
8132 * data to the user when a buffer fills or a PUSH comes in.)
8134 if (sb_offset + len == sbused(sb) &&
8140 * Are we doing hptsi, if so we must calculate the slot. We
8141 * only do hptsi in ESTABLISHED and with no RESET being
8142 * sent where we have data to send.
8144 if (((tp->t_state == TCPS_ESTABLISHED) ||
8145 (tp->t_state == TCPS_CLOSE_WAIT) ||
8146 ((tp->t_state == TCPS_FIN_WAIT_1) &&
8147 ((tp->t_flags & TF_SENTFIN) == 0) &&
8148 ((flags & TH_FIN) == 0))) &&
8149 ((flags & TH_RST) == 0) &&
8150 (rack->rc_always_pace)) {
8152 * We use the most optimistic possible cwnd/srtt for
8153 * sending calculations. This will make our
8154 * calculation anticipate getting more through
8155 * quicker then possible. But thats ok we don't want
8156 * the peer to have a gap in data sending.
8158 uint32_t srtt, cwnd, tr_perms = 0;
8160 if (rack->r_ctl.rc_rack_min_rtt)
8161 srtt = rack->r_ctl.rc_rack_min_rtt;
8163 srtt = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT));
8164 if (rack->r_ctl.rc_rack_largest_cwnd)
8165 cwnd = rack->r_ctl.rc_rack_largest_cwnd;
8167 cwnd = tp->snd_cwnd;
8168 tr_perms = cwnd / srtt;
8169 if (tr_perms == 0) {
8170 tr_perms = tp->t_maxseg;
8172 tot_len_this_send += len;
8174 * Calculate how long this will take to drain, if
8175 * the calculation comes out to zero, thats ok we
8176 * will use send_a_lot to possibly spin around for
8177 * more increasing tot_len_this_send to the point
8178 * that its going to require a pace, or we hit the
8179 * cwnd. Which in that case we are just waiting for
8182 slot = tot_len_this_send / tr_perms;
8183 /* Now do we reduce the time so we don't run dry? */
8184 if (slot && rack->rc_pace_reduce) {
8187 reduce = (slot / rack->rc_pace_reduce);
8188 if (reduce < slot) {
8193 if (rack->r_enforce_min_pace &&
8195 (tot_len_this_send >= (rack->r_min_pace_seg_thresh * tp->t_maxseg))) {
8196 /* We are enforcing a minimum pace time of 1ms */
8197 slot = rack->r_enforce_min_pace;
8203 if (tp->t_flags & TF_ACKNOW)
8204 TCPSTAT_INC(tcps_sndacks);
8205 else if (flags & (TH_SYN | TH_FIN | TH_RST))
8206 TCPSTAT_INC(tcps_sndctrl);
8207 else if (SEQ_GT(tp->snd_up, tp->snd_una))
8208 TCPSTAT_INC(tcps_sndurg);
8210 TCPSTAT_INC(tcps_sndwinup);
8212 m = m_gethdr(M_NOWAIT, MT_DATA);
8219 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
8224 m->m_data += max_linkhdr;
8227 SOCKBUF_UNLOCK_ASSERT(sb);
8228 m->m_pkthdr.rcvif = (struct ifnet *)0;
8230 mac_inpcb_create_mbuf(inp, m);
8234 ip6 = mtod(m, struct ip6_hdr *);
8235 #ifdef NETFLIX_TCP_O_UDP
8237 udp = (struct udphdr *)((caddr_t)ip6 + ipoptlen + sizeof(struct ip6_hdr));
8238 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
8239 udp->uh_dport = tp->t_port;
8240 ulen = hdrlen + len - sizeof(struct ip6_hdr);
8241 udp->uh_ulen = htons(ulen);
8242 th = (struct tcphdr *)(udp + 1);
8245 th = (struct tcphdr *)(ip6 + 1);
8246 tcpip_fillheaders(inp, /*tp->t_port, */ ip6, th);
8250 ip = mtod(m, struct ip *);
8252 ipov = (struct ipovly *)ip;
8254 #ifdef NETFLIX_TCP_O_UDP
8256 udp = (struct udphdr *)((caddr_t)ip + ipoptlen + sizeof(struct ip));
8257 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
8258 udp->uh_dport = tp->t_port;
8259 ulen = hdrlen + len - sizeof(struct ip);
8260 udp->uh_ulen = htons(ulen);
8261 th = (struct tcphdr *)(udp + 1);
8264 th = (struct tcphdr *)(ip + 1);
8265 tcpip_fillheaders(inp,/*tp->t_port, */ ip, th);
8268 * Fill in fields, remembering maximum advertised window for use in
8269 * delaying messages about window sizes. If resending a FIN, be sure
8270 * not to use a new sequence number.
8272 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
8273 tp->snd_nxt == tp->snd_max)
8276 * If we are starting a connection, send ECN setup SYN packet. If we
8277 * are on a retransmit, we may resend those bits a number of times
8280 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) {
8281 if (tp->t_rxtshift >= 1) {
8282 if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
8283 flags |= TH_ECE | TH_CWR;
8285 flags |= TH_ECE | TH_CWR;
8287 if (tp->t_state == TCPS_ESTABLISHED &&
8288 (tp->t_flags & TF_ECN_PERMIT)) {
8290 * If the peer has ECN, mark data packets with ECN capable
8291 * transmission (ECT). Ignore pure ack packets,
8292 * retransmissions and window probes.
8294 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) &&
8295 (sack_rxmit == 0) &&
8296 !((tp->t_flags & TF_FORCEDATA) && len == 1)) {
8299 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
8302 ip->ip_tos |= IPTOS_ECN_ECT0;
8303 TCPSTAT_INC(tcps_ecn_ect0);
8305 * Reply with proper ECN notifications.
8306 * Only set CWR on new data segments.
8308 if (tp->t_flags & TF_ECN_SND_CWR) {
8310 tp->t_flags &= ~TF_ECN_SND_CWR;
8313 if (tp->t_flags & TF_ECN_SND_ECE)
8317 * If we are doing retransmissions, then snd_nxt will not reflect
8318 * the first unsent octet. For ACK only packets, we do not want the
8319 * sequence number of the retransmitted packet, we want the sequence
8320 * number of the next unsent octet. So, if there is no data (and no
8321 * SYN or FIN), use snd_max instead of snd_nxt when filling in
8322 * ti_seq. But if we are in persist state, snd_max might reflect
8323 * one byte beyond the right edge of the window, so use snd_nxt in
8324 * that case, since we know we aren't doing a retransmission.
8325 * (retransmit and persist are mutually exclusive...)
8327 if (sack_rxmit == 0) {
8328 if (len || (flags & (TH_SYN | TH_FIN)) ||
8329 rack->rc_in_persist) {
8330 th->th_seq = htonl(tp->snd_nxt);
8331 rack_seq = tp->snd_nxt;
8332 } else if (flags & TH_RST) {
8334 * For a Reset send the last cum ack in sequence
8335 * (this like any other choice may still generate a
8336 * challenge ack, if a ack-update packet is in
8339 th->th_seq = htonl(tp->snd_una);
8340 rack_seq = tp->snd_una;
8342 th->th_seq = htonl(tp->snd_max);
8343 rack_seq = tp->snd_max;
8346 th->th_seq = htonl(rsm->r_start);
8347 rack_seq = rsm->r_start;
8349 th->th_ack = htonl(tp->rcv_nxt);
8351 bcopy(opt, th + 1, optlen);
8352 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
8354 th->th_flags = flags;
8356 * Calculate receive window. Don't shrink window, but avoid silly
8359 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
8360 recwin < (long)tp->t_maxseg)
8362 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
8363 recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
8364 recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
8365 if (recwin > (long)TCP_MAXWIN << tp->rcv_scale)
8366 recwin = (long)TCP_MAXWIN << tp->rcv_scale;
8369 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
8370 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is
8371 * handled in syncache.
8374 th->th_win = htons((u_short)
8375 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
8377 /* Avoid shrinking window with window scaling. */
8378 recwin = roundup2(recwin, 1 << tp->rcv_scale);
8379 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
8382 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
8383 * window. This may cause the remote transmitter to stall. This
8384 * flag tells soreceive() to disable delayed acknowledgements when
8385 * draining the buffer. This can occur if the receiver is
8386 * attempting to read more data than can be buffered prior to
8387 * transmitting on the connection.
8389 if (th->th_win == 0) {
8391 tp->t_flags |= TF_RXWIN0SENT;
8393 tp->t_flags &= ~TF_RXWIN0SENT;
8394 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
8395 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
8396 th->th_flags |= TH_URG;
8399 * If no urgent pointer to send, then we pull the urgent
8400 * pointer to the left edge of the send window so that it
8401 * doesn't drift into the send window on sequence number
8404 tp->snd_up = tp->snd_una; /* drag it along */
8406 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
8407 if (to.to_flags & TOF_SIGNATURE) {
8409 * Calculate MD5 signature and put it into the place
8410 * determined before.
8411 * NOTE: since TCP options buffer doesn't point into
8412 * mbuf's data, calculate offset and use it.
8414 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
8415 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
8417 * Do not send segment if the calculation of MD5
8418 * digest has failed.
8426 * Put TCP length in extended header, and then checksum extended
8429 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
8433 * ip6_plen is not need to be filled now, and will be filled
8436 #ifdef NETFLIX_TCP_O_UDP
8438 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
8439 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
8440 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
8441 th->th_sum = htons(0);
8442 UDPSTAT_INC(udps_opackets);
8445 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
8446 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
8447 th->th_sum = in6_cksum_pseudo(ip6,
8448 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
8450 #ifdef NETFLIX_TCP_O_UDP
8455 #if defined(INET6) && defined(INET)
8460 #ifdef NETFLIX_TCP_O_UDP
8462 m->m_pkthdr.csum_flags = CSUM_UDP;
8463 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
8464 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
8465 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
8466 th->th_sum = htons(0);
8467 UDPSTAT_INC(udps_opackets);
8470 m->m_pkthdr.csum_flags = CSUM_TCP;
8471 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
8472 th->th_sum = in_pseudo(ip->ip_src.s_addr,
8473 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
8474 IPPROTO_TCP + len + optlen));
8475 #ifdef NETFLIX_TCP_O_UDP
8478 /* IP version must be set here for ipv4/ipv6 checking later */
8479 KASSERT(ip->ip_v == IPVERSION,
8480 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
8485 * Enable TSO and specify the size of the segments. The TCP pseudo
8486 * header checksum is always provided. XXX: Fixme: This is currently
8487 * not the case for IPv6.
8490 KASSERT(len > tp->t_maxseg - optlen,
8491 ("%s: len <= tso_segsz", __func__));
8492 m->m_pkthdr.csum_flags |= CSUM_TSO;
8493 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
8495 KASSERT(len + hdrlen == m_length(m, NULL),
8496 ("%s: mbuf chain different than expected: %d + %u != %u",
8497 __func__, len, hdrlen, m_length(m, NULL)));
8500 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
8501 hhook_run_tcp_est_out(tp, th, &to, len, tso);
8508 if (so->so_options & SO_DEBUG) {
8515 save = ipov->ih_len;
8516 ipov->ih_len = htons(m->m_pkthdr.len /* - hdrlen +
8517 * (th->th_off << 2) */ );
8519 tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0);
8523 ipov->ih_len = save;
8525 #endif /* TCPDEBUG */
8527 /* We're getting ready to send; log now. */
8528 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
8529 union tcp_log_stackspecific log;
8531 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
8532 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
8533 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
8534 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
8535 if (rsm || sack_rxmit) {
8536 log.u_bbr.flex8 = 1;
8538 log.u_bbr.flex8 = 0;
8540 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
8541 len, &log, false, NULL, NULL, 0, NULL);
8546 * Fill in IP length and desired time to live and send to IP level.
8547 * There should be a better way to handle ttl and tos; we could keep
8548 * them in the template, but need a way to checksum without them.
8551 * m->m_pkthdr.len should have been set before cksum calcuration,
8552 * because in6_cksum() need it.
8557 * we separately set hoplimit for every segment, since the
8558 * user might want to change the value via setsockopt. Also,
8559 * desired default hop limit might be changed via Neighbor
8562 ip6->ip6_hlim = in6_selecthlim(inp, NULL);
8565 * Set the packet size here for the benefit of DTrace
8566 * probes. ip6_output() will set it properly; it's supposed
8567 * to include the option header lengths as well.
8569 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
8571 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
8572 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
8574 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
8576 if (tp->t_state == TCPS_SYN_SENT)
8577 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
8579 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
8580 /* TODO: IPv6 IP6TOS_ECT bit on */
8581 error = ip6_output(m, tp->t_inpcb->in6p_outputopts,
8583 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
8586 if (error == EMSGSIZE && inp->inp_route6.ro_rt != NULL)
8587 mtu = inp->inp_route6.ro_rt->rt_mtu;
8590 #if defined(INET) && defined(INET6)
8595 ip->ip_len = htons(m->m_pkthdr.len);
8597 if (inp->inp_vflag & INP_IPV6PROTO)
8598 ip->ip_ttl = in6_selecthlim(inp, NULL);
8601 * If we do path MTU discovery, then we set DF on every
8602 * packet. This might not be the best thing to do according
8603 * to RFC3390 Section 2. However the tcp hostcache migitates
8604 * the problem so it affects only the first tcp connection
8607 * NB: Don't set DF on small MTU/MSS to have a safe
8610 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
8611 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
8612 if (tp->t_port == 0 || len < V_tcp_minmss) {
8613 ip->ip_off |= htons(IP_DF);
8616 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
8619 if (tp->t_state == TCPS_SYN_SENT)
8620 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
8622 TCP_PROBE5(send, NULL, tp, ip, tp, th);
8624 error = ip_output(m, tp->t_inpcb->inp_options, &inp->inp_route,
8625 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0,
8627 if (error == EMSGSIZE && inp->inp_route.ro_rt != NULL)
8628 mtu = inp->inp_route.ro_rt->rt_mtu;
8634 lgb->tlb_errno = error;
8638 * In transmit state, time the transmission and arrange for the
8639 * retransmit. In persist state, just set snd_max.
8642 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
8643 (tp->t_flags & TF_SACK_PERMIT) &&
8644 tp->rcv_numsacks > 0)
8645 tcp_clean_dsack_blocks(tp);
8647 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
8648 else if (len == 1) {
8649 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
8650 } else if (len > 1) {
8653 idx = (len / tp->t_maxseg) + 3;
8654 if (idx >= TCP_MSS_ACCT_ATIMER)
8655 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
8657 counter_u64_add(rack_out_size[idx], 1);
8660 if (sub_from_prr && (error == 0)) {
8661 if (rack->r_ctl.rc_prr_sndcnt >= len)
8662 rack->r_ctl.rc_prr_sndcnt -= len;
8664 rack->r_ctl.rc_prr_sndcnt = 0;
8667 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, cts,
8669 if ((tp->t_flags & TF_FORCEDATA) == 0 ||
8670 (rack->rc_in_persist == 0)) {
8671 #ifdef NETFLIX_STATS
8672 tcp_seq startseq = tp->snd_nxt;
8675 * Advance snd_nxt over sequence space of this segment.
8678 /* We don't log or do anything with errors */
8681 if (flags & (TH_SYN | TH_FIN)) {
8684 if (flags & TH_FIN) {
8686 tp->t_flags |= TF_SENTFIN;
8689 /* In the ENOBUFS case we do *not* update snd_max */
8694 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
8695 if (tp->snd_una == tp->snd_max) {
8697 * Update the time we just added data since
8698 * none was outstanding.
8700 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
8701 tp->t_acktime = ticks;
8703 tp->snd_max = tp->snd_nxt;
8704 #ifdef NETFLIX_STATS
8705 if (!(tp->t_flags & TF_GPUTINPROG) && len) {
8706 tp->t_flags |= TF_GPUTINPROG;
8707 tp->gput_seq = startseq;
8708 tp->gput_ack = startseq +
8709 ulmin(sbavail(sb) - sb_offset, sendwin);
8710 tp->gput_ts = tcp_ts_getticks();
8715 * Set retransmit timer if not currently set, and not doing
8716 * a pure ack or a keep-alive probe. Initial value for
8717 * retransmit timer is smoothed round-trip time + 2 *
8718 * round-trip time variance. Initialize shift counter which
8719 * is used for backoff of retransmit time.
8722 if ((tp->snd_wnd == 0) &&
8723 TCPS_HAVEESTABLISHED(tp->t_state)) {
8725 * If the persists timer was set above (right before
8726 * the goto send), and still needs to be on. Lets
8727 * make sure all is canceled. If the persist timer
8728 * is not running, we want to get it up.
8730 if (rack->rc_in_persist == 0) {
8731 rack_enter_persist(tp, rack, cts);
8736 * Persist case, update snd_max but since we are in persist
8737 * mode (no window) we do not update snd_nxt.
8746 if (flags & TH_FIN) {
8748 tp->t_flags |= TF_SENTFIN;
8750 /* In the ENOBUFS case we do *not* update snd_max */
8751 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) {
8752 if (tp->snd_una == tp->snd_max) {
8754 * Update the time we just added data since
8755 * none was outstanding.
8757 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
8758 tp->t_acktime = ticks;
8760 tp->snd_max = tp->snd_nxt + len;
8765 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */
8767 * Failures do not advance the seq counter above. For the
8768 * case of ENOBUFS we will fall out and retry in 1ms with
8769 * the hpts. Everything else will just have to retransmit
8772 * In any case, we do not want to loop around for another
8773 * send without a good reason.
8778 tp->t_flags &= ~TF_FORCEDATA;
8779 tp->t_softerror = error;
8784 * Pace us right away to retry in a some
8787 slot = 1 + rack->rc_enobuf;
8788 if (rack->rc_enobuf < 255)
8790 if (slot > (rack->rc_rack_rtt / 2)) {
8791 slot = rack->rc_rack_rtt / 2;
8796 counter_u64_add(rack_saw_enobuf, 1);
8801 * For some reason the interface we used initially
8802 * to send segments changed to another or lowered
8803 * its MTU. If TSO was active we either got an
8804 * interface without TSO capabilits or TSO was
8805 * turned off. If we obtained mtu from ip_output()
8806 * then update it and try again.
8809 tp->t_flags &= ~TF_TSO;
8811 tcp_mss_update(tp, -1, mtu, NULL, NULL);
8815 rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, 0, 1);
8816 tp->t_flags &= ~TF_FORCEDATA;
8819 counter_u64_add(rack_saw_enetunreach, 1);
8823 if (TCPS_HAVERCVDSYN(tp->t_state)) {
8824 tp->t_softerror = error;
8829 rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, 0, 1);
8830 tp->t_flags &= ~TF_FORCEDATA;
8834 rack->rc_enobuf = 0;
8836 TCPSTAT_INC(tcps_sndtotal);
8839 * Data sent (as far as we can tell). If this advertises a larger
8840 * window than any other segment, then remember the size of the
8841 * advertised window. Any pending ACK has now been sent.
8843 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
8844 tp->rcv_adv = tp->rcv_nxt + recwin;
8845 tp->last_ack_sent = tp->rcv_nxt;
8846 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
8848 rack->r_tlp_running = 0;
8849 if ((flags & TH_RST) || (would_have_fin == 1)) {
8851 * We don't send again after a RST. We also do *not* send
8852 * again if we would have had a find, but now have
8859 /* set the rack tcb into the slot N */
8860 counter_u64_add(rack_paced_segments, 1);
8861 } else if (sendalot) {
8863 counter_u64_add(rack_unpaced_segments, 1);
8865 tp->t_flags &= ~TF_FORCEDATA;
8868 counter_u64_add(rack_unpaced_segments, 1);
8870 tp->t_flags &= ~TF_FORCEDATA;
8871 rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, tot_len_this_send, 1);
8876 * rack_ctloutput() must drop the inpcb lock before performing copyin on
8877 * socket option arguments. When it re-acquires the lock after the copy, it
8878 * has to revalidate that the connection is still valid for the socket
8882 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
8883 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
8885 int32_t error = 0, optval;
8887 switch (sopt->sopt_name) {
8888 case TCP_RACK_PROP_RATE:
8890 case TCP_RACK_TLP_REDUCE:
8891 case TCP_RACK_EARLY_RECOV:
8892 case TCP_RACK_PACE_ALWAYS:
8894 case TCP_RACK_PACE_REDUCE:
8895 case TCP_RACK_PACE_MAX_SEG:
8896 case TCP_RACK_PRR_SENDALOT:
8897 case TCP_RACK_MIN_TO:
8898 case TCP_RACK_EARLY_SEG:
8899 case TCP_RACK_REORD_THRESH:
8900 case TCP_RACK_REORD_FADE:
8901 case TCP_RACK_TLP_THRESH:
8902 case TCP_RACK_PKT_DELAY:
8903 case TCP_RACK_TLP_USE:
8904 case TCP_RACK_TLP_INC_VAR:
8905 case TCP_RACK_IDLE_REDUCE_HIGH:
8906 case TCP_RACK_MIN_PACE:
8907 case TCP_RACK_MIN_PACE_SEG:
8908 case TCP_BBR_RACK_RTT_USE:
8909 case TCP_DATA_AFTER_CLOSE:
8912 return (tcp_default_ctloutput(so, sopt, inp, tp));
8916 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
8920 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
8922 return (ECONNRESET);
8924 tp = intotcpcb(inp);
8925 rack = (struct tcp_rack *)tp->t_fb_ptr;
8926 switch (sopt->sopt_name) {
8927 case TCP_RACK_PROP_RATE:
8928 if ((optval <= 0) || (optval >= 100)) {
8932 RACK_OPTS_INC(tcp_rack_prop_rate);
8933 rack->r_ctl.rc_prop_rate = optval;
8935 case TCP_RACK_TLP_USE:
8936 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
8940 RACK_OPTS_INC(tcp_tlp_use);
8941 rack->rack_tlp_threshold_use = optval;
8944 /* RACK proportional rate reduction (bool) */
8945 RACK_OPTS_INC(tcp_rack_prop);
8946 rack->r_ctl.rc_prop_reduce = optval;
8948 case TCP_RACK_TLP_REDUCE:
8949 /* RACK TLP cwnd reduction (bool) */
8950 RACK_OPTS_INC(tcp_rack_tlp_reduce);
8951 rack->r_ctl.rc_tlp_cwnd_reduce = optval;
8953 case TCP_RACK_EARLY_RECOV:
8954 /* Should recovery happen early (bool) */
8955 RACK_OPTS_INC(tcp_rack_early_recov);
8956 rack->r_ctl.rc_early_recovery = optval;
8958 case TCP_RACK_PACE_ALWAYS:
8959 /* Use the always pace method (bool) */
8960 RACK_OPTS_INC(tcp_rack_pace_always);
8962 rack->rc_always_pace = 1;
8964 rack->rc_always_pace = 0;
8966 case TCP_RACK_PACE_REDUCE:
8967 /* RACK Hptsi reduction factor (divisor) */
8968 RACK_OPTS_INC(tcp_rack_pace_reduce);
8970 /* Must be non-zero */
8971 rack->rc_pace_reduce = optval;
8975 case TCP_RACK_PACE_MAX_SEG:
8976 /* Max segments in a pace */
8977 RACK_OPTS_INC(tcp_rack_max_seg);
8978 rack->rc_pace_max_segs = optval;
8980 case TCP_RACK_PRR_SENDALOT:
8981 /* Allow PRR to send more than one seg */
8982 RACK_OPTS_INC(tcp_rack_prr_sendalot);
8983 rack->r_ctl.rc_prr_sendalot = optval;
8985 case TCP_RACK_MIN_TO:
8986 /* Minimum time between rack t-o's in ms */
8987 RACK_OPTS_INC(tcp_rack_min_to);
8988 rack->r_ctl.rc_min_to = optval;
8990 case TCP_RACK_EARLY_SEG:
8991 /* If early recovery max segments */
8992 RACK_OPTS_INC(tcp_rack_early_seg);
8993 rack->r_ctl.rc_early_recovery_segs = optval;
8995 case TCP_RACK_REORD_THRESH:
8996 /* RACK reorder threshold (shift amount) */
8997 RACK_OPTS_INC(tcp_rack_reord_thresh);
8998 if ((optval > 0) && (optval < 31))
8999 rack->r_ctl.rc_reorder_shift = optval;
9003 case TCP_RACK_REORD_FADE:
9004 /* Does reordering fade after ms time */
9005 RACK_OPTS_INC(tcp_rack_reord_fade);
9006 rack->r_ctl.rc_reorder_fade = optval;
9008 case TCP_RACK_TLP_THRESH:
9009 /* RACK TLP theshold i.e. srtt+(srtt/N) */
9010 RACK_OPTS_INC(tcp_rack_tlp_thresh);
9012 rack->r_ctl.rc_tlp_threshold = optval;
9016 case TCP_RACK_PKT_DELAY:
9017 /* RACK added ms i.e. rack-rtt + reord + N */
9018 RACK_OPTS_INC(tcp_rack_pkt_delay);
9019 rack->r_ctl.rc_pkt_delay = optval;
9021 case TCP_RACK_TLP_INC_VAR:
9022 /* Does TLP include rtt variance in t-o */
9023 RACK_OPTS_INC(tcp_rack_tlp_inc_var);
9024 rack->r_ctl.rc_prr_inc_var = optval;
9026 case TCP_RACK_IDLE_REDUCE_HIGH:
9027 RACK_OPTS_INC(tcp_rack_idle_reduce_high);
9029 rack->r_idle_reduce_largest = 1;
9031 rack->r_idle_reduce_largest = 0;
9035 tp->t_delayed_ack = 0;
9037 tp->t_delayed_ack = 1;
9038 if (tp->t_flags & TF_DELACK) {
9039 tp->t_flags &= ~TF_DELACK;
9040 tp->t_flags |= TF_ACKNOW;
9044 case TCP_RACK_MIN_PACE:
9045 RACK_OPTS_INC(tcp_rack_min_pace);
9047 rack->r_enforce_min_pace = 3;
9049 rack->r_enforce_min_pace = optval;
9051 case TCP_RACK_MIN_PACE_SEG:
9052 RACK_OPTS_INC(tcp_rack_min_pace_seg);
9054 rack->r_min_pace_seg_thresh = 15;
9056 rack->r_min_pace_seg_thresh = optval;
9058 case TCP_BBR_RACK_RTT_USE:
9059 if ((optval != USE_RTT_HIGH) &&
9060 (optval != USE_RTT_LOW) &&
9061 (optval != USE_RTT_AVG))
9064 rack->r_ctl.rc_rate_sample_method = optval;
9066 case TCP_DATA_AFTER_CLOSE:
9068 rack->rc_allow_data_af_clo = 1;
9070 rack->rc_allow_data_af_clo = 0;
9073 return (tcp_default_ctloutput(so, sopt, inp, tp));
9076 /* tcp_log_socket_option(tp, sopt->sopt_name, optval, error);*/
9082 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
9083 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
9085 int32_t error, optval;
9088 * Because all our options are either boolean or an int, we can just
9089 * pull everything into optval and then unlock and copy. If we ever
9090 * add a option that is not a int, then this will have quite an
9091 * impact to this routine.
9093 switch (sopt->sopt_name) {
9094 case TCP_RACK_PROP_RATE:
9095 optval = rack->r_ctl.rc_prop_rate;
9098 /* RACK proportional rate reduction (bool) */
9099 optval = rack->r_ctl.rc_prop_reduce;
9101 case TCP_RACK_TLP_REDUCE:
9102 /* RACK TLP cwnd reduction (bool) */
9103 optval = rack->r_ctl.rc_tlp_cwnd_reduce;
9105 case TCP_RACK_EARLY_RECOV:
9106 /* Should recovery happen early (bool) */
9107 optval = rack->r_ctl.rc_early_recovery;
9109 case TCP_RACK_PACE_REDUCE:
9110 /* RACK Hptsi reduction factor (divisor) */
9111 optval = rack->rc_pace_reduce;
9113 case TCP_RACK_PACE_MAX_SEG:
9114 /* Max segments in a pace */
9115 optval = rack->rc_pace_max_segs;
9117 case TCP_RACK_PACE_ALWAYS:
9118 /* Use the always pace method */
9119 optval = rack->rc_always_pace;
9121 case TCP_RACK_PRR_SENDALOT:
9122 /* Allow PRR to send more than one seg */
9123 optval = rack->r_ctl.rc_prr_sendalot;
9125 case TCP_RACK_MIN_TO:
9126 /* Minimum time between rack t-o's in ms */
9127 optval = rack->r_ctl.rc_min_to;
9129 case TCP_RACK_EARLY_SEG:
9130 /* If early recovery max segments */
9131 optval = rack->r_ctl.rc_early_recovery_segs;
9133 case TCP_RACK_REORD_THRESH:
9134 /* RACK reorder threshold (shift amount) */
9135 optval = rack->r_ctl.rc_reorder_shift;
9137 case TCP_RACK_REORD_FADE:
9138 /* Does reordering fade after ms time */
9139 optval = rack->r_ctl.rc_reorder_fade;
9141 case TCP_RACK_TLP_THRESH:
9142 /* RACK TLP theshold i.e. srtt+(srtt/N) */
9143 optval = rack->r_ctl.rc_tlp_threshold;
9145 case TCP_RACK_PKT_DELAY:
9146 /* RACK added ms i.e. rack-rtt + reord + N */
9147 optval = rack->r_ctl.rc_pkt_delay;
9149 case TCP_RACK_TLP_USE:
9150 optval = rack->rack_tlp_threshold_use;
9152 case TCP_RACK_TLP_INC_VAR:
9153 /* Does TLP include rtt variance in t-o */
9154 optval = rack->r_ctl.rc_prr_inc_var;
9156 case TCP_RACK_IDLE_REDUCE_HIGH:
9157 optval = rack->r_idle_reduce_largest;
9159 case TCP_RACK_MIN_PACE:
9160 optval = rack->r_enforce_min_pace;
9162 case TCP_RACK_MIN_PACE_SEG:
9163 optval = rack->r_min_pace_seg_thresh;
9165 case TCP_BBR_RACK_RTT_USE:
9166 optval = rack->r_ctl.rc_rate_sample_method;
9169 optval = tp->t_delayed_ack;
9171 case TCP_DATA_AFTER_CLOSE:
9172 optval = rack->rc_allow_data_af_clo;
9175 return (tcp_default_ctloutput(so, sopt, inp, tp));
9179 error = sooptcopyout(sopt, &optval, sizeof optval);
9184 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp)
9186 int32_t error = EINVAL;
9187 struct tcp_rack *rack;
9189 rack = (struct tcp_rack *)tp->t_fb_ptr;
9194 if (sopt->sopt_dir == SOPT_SET) {
9195 return (rack_set_sockopt(so, sopt, inp, tp, rack));
9196 } else if (sopt->sopt_dir == SOPT_GET) {
9197 return (rack_get_sockopt(so, sopt, inp, tp, rack));
9205 struct tcp_function_block __tcp_rack = {
9206 .tfb_tcp_block_name = __XSTRING(STACKNAME),
9207 .tfb_tcp_output = rack_output,
9208 .tfb_tcp_do_segment = rack_do_segment,
9209 .tfb_tcp_ctloutput = rack_ctloutput,
9210 .tfb_tcp_fb_init = rack_init,
9211 .tfb_tcp_fb_fini = rack_fini,
9212 .tfb_tcp_timer_stop_all = rack_stopall,
9213 .tfb_tcp_timer_activate = rack_timer_activate,
9214 .tfb_tcp_timer_active = rack_timer_active,
9215 .tfb_tcp_timer_stop = rack_timer_stop,
9216 .tfb_tcp_rexmit_tmr = rack_remxt_tmr,
9217 .tfb_tcp_handoff_ok = rack_handoff_ok
9220 static const char *rack_stack_names[] = {
9221 __XSTRING(STACKNAME),
9223 __XSTRING(STACKALIAS),
9228 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
9230 memset(mem, 0, size);
9235 rack_dtor(void *mem, int32_t size, void *arg)
9240 static bool rack_mod_inited = false;
9243 tcp_addrack(module_t mod, int32_t type, void *data)
9250 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
9251 sizeof(struct rack_sendmap),
9252 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
9254 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
9255 sizeof(struct tcp_rack),
9256 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
9258 sysctl_ctx_init(&rack_sysctl_ctx);
9259 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
9260 SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
9262 __XSTRING(STACKNAME),
9265 if (rack_sysctl_root == NULL) {
9266 printf("Failed to add sysctl node\n");
9270 rack_init_sysctls();
9271 num_stacks = nitems(rack_stack_names);
9272 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
9273 rack_stack_names, &num_stacks);
9275 printf("Failed to register %s stack name for "
9276 "%s module\n", rack_stack_names[num_stacks],
9277 __XSTRING(MODNAME));
9278 sysctl_ctx_free(&rack_sysctl_ctx);
9280 uma_zdestroy(rack_zone);
9281 uma_zdestroy(rack_pcb_zone);
9282 rack_counter_destroy();
9283 printf("Failed to register rack module -- err:%d\n", err);
9286 rack_mod_inited = true;
9289 err = deregister_tcp_functions(&__tcp_rack, true, false);
9292 err = deregister_tcp_functions(&__tcp_rack, false, true);
9295 if (rack_mod_inited) {
9296 uma_zdestroy(rack_zone);
9297 uma_zdestroy(rack_pcb_zone);
9298 sysctl_ctx_free(&rack_sysctl_ctx);
9299 rack_counter_destroy();
9300 rack_mod_inited = false;
9305 return (EOPNOTSUPP);
9310 static moduledata_t tcp_rack = {
9311 .name = __XSTRING(MODNAME),
9312 .evhand = tcp_addrack,
9316 MODULE_VERSION(MODNAME, 1);
9317 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
9318 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);