2 * Copyright (c) 2016-2018
3 * Netflix Inc. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
33 #include "opt_ipsec.h"
34 #include "opt_tcpdebug.h"
36 #include <sys/param.h>
37 #include <sys/module.h>
38 #include <sys/kernel.h>
40 #include <sys/hhook.h>
43 #include <sys/malloc.h>
45 #include <sys/mutex.h>
47 #include <sys/proc.h> /* for proc0 declaration */
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
53 #include <sys/stats.h>
55 #include <sys/refcount.h>
56 #include <sys/queue.h>
58 #include <sys/kthread.h>
59 #include <sys/kern_prefetch.h>
63 #include <net/route.h>
66 #define TCPSTATES /* for logging */
68 #include <netinet/in.h>
69 #include <netinet/in_kdtrace.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
73 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
74 #include <netinet/ip_var.h>
75 #include <netinet/ip6.h>
76 #include <netinet6/in6_pcb.h>
77 #include <netinet6/ip6_var.h>
79 #include <netinet/tcp.h>
80 #include <netinet/tcp_fsm.h>
81 #include <netinet/tcp_log_buf.h>
82 #include <netinet/tcp_seq.h>
83 #include <netinet/tcp_timer.h>
84 #include <netinet/tcp_var.h>
85 #include <netinet/tcp_hpts.h>
86 #include <netinet/tcpip.h>
87 #include <netinet/cc/cc.h>
89 #include <netinet/tcp_newcwv.h>
91 #include <netinet/tcp_fastopen.h>
93 #include <netinet/tcp_debug.h>
96 #include <netinet/tcp_offload.h>
99 #include <netinet6/tcp6_var.h>
102 #include <netipsec/ipsec_support.h>
104 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
105 #include <netipsec/ipsec.h>
106 #include <netipsec/ipsec6.h>
109 #include <netinet/udp.h>
110 #include <netinet/udp_var.h>
111 #include <machine/in_cksum.h>
114 #include <security/mac/mac_framework.h>
116 #include "sack_filter.h"
117 #include "tcp_rack.h"
118 #include "rack_bbr_common.h"
120 uma_zone_t rack_zone;
121 uma_zone_t rack_pcb_zone;
124 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t)))
127 struct sysctl_ctx_list rack_sysctl_ctx;
128 struct sysctl_oid *rack_sysctl_root;
134 * The RACK module incorporates a number of
135 * TCP ideas that have been put out into the IETF
136 * over the last few years:
137 * - Matt Mathis's Rate Halving which slowly drops
138 * the congestion window so that the ack clock can
139 * be maintained during a recovery.
140 * - Yuchung Cheng's RACK TCP (for which its named) that
141 * will stop us using the number of dup acks and instead
142 * use time as the gage of when we retransmit.
143 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
144 * of Dukkipati et.al.
145 * RACK depends on SACK, so if an endpoint arrives that
146 * cannot do SACK the state machine below will shuttle the
147 * connection back to using the "default" TCP stack that is
150 * To implement RACK the original TCP stack was first decomposed
151 * into a functional state machine with individual states
152 * for each of the possible TCP connection states. The do_segement
153 * functions role in life is to mandate the connection supports SACK
154 * initially and then assure that the RACK state matches the conenction
155 * state before calling the states do_segment function. Each
156 * state is simplified due to the fact that the original do_segment
157 * has been decomposed and we *know* what state we are in (no
158 * switches on the state) and all tests for SACK are gone. This
159 * greatly simplifies what each state does.
161 * TCP output is also over-written with a new version since it
162 * must maintain the new rack scoreboard.
165 static int32_t rack_precache = 1;
166 static int32_t rack_tlp_thresh = 1;
167 static int32_t rack_reorder_thresh = 2;
168 static int32_t rack_reorder_fade = 60000; /* 0 - never fade, def 60,000
170 static int32_t rack_pkt_delay = 1;
171 static int32_t rack_inc_var = 0;/* For TLP */
172 static int32_t rack_reduce_largest_on_idle = 0;
173 static int32_t rack_min_pace_time = 0;
174 static int32_t rack_min_pace_time_seg_req=6;
175 static int32_t rack_early_recovery = 1;
176 static int32_t rack_early_recovery_max_seg = 6;
177 static int32_t rack_send_a_lot_in_prr = 1;
178 static int32_t rack_min_to = 1; /* Number of ms minimum timeout */
179 static int32_t rack_tlp_in_recovery = 1; /* Can we do TLP in recovery? */
180 static int32_t rack_verbose_logging = 0;
181 static int32_t rack_ignore_data_after_close = 1;
183 * Currently regular tcp has a rto_min of 30ms
184 * the backoff goes 12 times so that ends up
185 * being a total of 122.850 seconds before a
186 * connection is killed.
188 static int32_t rack_tlp_min = 10;
189 static int32_t rack_rto_min = 30; /* 30ms same as main freebsd */
190 static int32_t rack_rto_max = 30000; /* 30 seconds */
191 static const int32_t rack_free_cache = 2;
192 static int32_t rack_hptsi_segments = 40;
193 static int32_t rack_rate_sample_method = USE_RTT_LOW;
194 static int32_t rack_pace_every_seg = 1;
195 static int32_t rack_delayed_ack_time = 200; /* 200ms */
196 static int32_t rack_slot_reduction = 4;
197 static int32_t rack_lower_cwnd_at_tlp = 0;
198 static int32_t rack_use_proportional_reduce = 0;
199 static int32_t rack_proportional_rate = 10;
200 static int32_t rack_tlp_max_resend = 2;
201 static int32_t rack_limited_retran = 0;
202 static int32_t rack_always_send_oldest = 0;
203 static int32_t rack_sack_block_limit = 128;
204 static int32_t rack_use_sack_filter = 1;
205 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
207 /* Rack specific counters */
208 counter_u64_t rack_badfr;
209 counter_u64_t rack_badfr_bytes;
210 counter_u64_t rack_rtm_prr_retran;
211 counter_u64_t rack_rtm_prr_newdata;
212 counter_u64_t rack_timestamp_mismatch;
213 counter_u64_t rack_reorder_seen;
214 counter_u64_t rack_paced_segments;
215 counter_u64_t rack_unpaced_segments;
216 counter_u64_t rack_saw_enobuf;
217 counter_u64_t rack_saw_enetunreach;
219 /* Tail loss probe counters */
220 counter_u64_t rack_tlp_tot;
221 counter_u64_t rack_tlp_newdata;
222 counter_u64_t rack_tlp_retran;
223 counter_u64_t rack_tlp_retran_bytes;
224 counter_u64_t rack_tlp_retran_fail;
225 counter_u64_t rack_to_tot;
226 counter_u64_t rack_to_arm_rack;
227 counter_u64_t rack_to_arm_tlp;
228 counter_u64_t rack_to_alloc;
229 counter_u64_t rack_to_alloc_hard;
230 counter_u64_t rack_to_alloc_emerg;
232 counter_u64_t rack_sack_proc_all;
233 counter_u64_t rack_sack_proc_short;
234 counter_u64_t rack_sack_proc_restart;
235 counter_u64_t rack_runt_sacks;
236 counter_u64_t rack_used_tlpmethod;
237 counter_u64_t rack_used_tlpmethod2;
238 counter_u64_t rack_enter_tlp_calc;
239 counter_u64_t rack_input_idle_reduces;
240 counter_u64_t rack_tlp_does_nada;
242 /* Temp CPU counters */
243 counter_u64_t rack_find_high;
245 counter_u64_t rack_progress_drops;
246 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
247 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
250 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line);
253 rack_process_ack(struct mbuf *m, struct tcphdr *th,
254 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t * ti_locked,
255 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val);
257 rack_process_data(struct mbuf *m, struct tcphdr *th,
258 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
259 int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
261 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
262 struct tcphdr *th, uint16_t nsegs, uint16_t type, int32_t recovery);
263 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
264 static struct rack_sendmap *
265 rack_check_recovery_mode(struct tcpcb *tp,
268 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th,
270 static void rack_counter_destroy(void);
272 rack_ctloutput(struct socket *so, struct sockopt *sopt,
273 struct inpcb *inp, struct tcpcb *tp);
274 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
276 rack_do_segment(struct mbuf *m, struct tcphdr *th,
277 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
278 uint8_t iptos, int32_t ti_locked);
279 static void rack_dtor(void *mem, int32_t size, void *arg);
281 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm,
282 uint32_t t, uint32_t cts);
283 static struct rack_sendmap *
284 rack_find_high_nonack(struct tcp_rack *rack,
285 struct rack_sendmap *rsm);
286 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
287 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
288 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
290 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
291 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
292 static int32_t rack_handoff_ok(struct tcpcb *tp);
293 static int32_t rack_init(struct tcpcb *tp);
294 static void rack_init_sysctls(void);
296 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
299 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
300 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts,
301 uint8_t pass, struct rack_sendmap *hintrsm);
303 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
304 struct rack_sendmap *rsm);
305 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num);
306 static int32_t rack_output(struct tcpcb *tp);
308 rack_hpts_do_segment(struct mbuf *m, struct tcphdr *th,
309 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
310 uint8_t iptos, int32_t ti_locked, int32_t nxt_pkt, struct timeval *tv);
313 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
314 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
316 static void rack_post_recovery(struct tcpcb *tp, struct tcphdr *th);
317 static void rack_remxt_tmr(struct tcpcb *tp);
319 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
320 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
321 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
322 static int32_t rack_stopall(struct tcpcb *tp);
324 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type,
326 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type);
327 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
328 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type);
330 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
331 struct rack_sendmap *rsm, uint32_t ts, int32_t * lenp);
333 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
334 struct rack_sendmap *rsm, uint32_t ts);
336 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
337 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type);
338 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
340 rack_challenge_ack(struct mbuf *m, struct tcphdr *th,
341 struct tcpcb *tp, int32_t * ti_locked, int32_t * ret_val);
343 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
344 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
345 int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
347 rack_do_closing(struct mbuf *m, struct tcphdr *th,
348 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
349 int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
350 static void rack_do_drop(struct mbuf *m, struct tcpcb *tp, int32_t * ti_locked);
352 rack_do_dropafterack(struct mbuf *m, struct tcpcb *tp,
353 struct tcphdr *th, int32_t * ti_locked, int32_t thflags, int32_t tlen, int32_t * ret_val);
355 rack_do_dropwithreset(struct mbuf *m, struct tcpcb *tp,
356 struct tcphdr *th, int32_t * ti_locked, int32_t rstreason, int32_t tlen);
358 rack_do_established(struct mbuf *m, struct tcphdr *th,
359 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
360 int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
362 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
363 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
364 int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t nxt_pkt);
366 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
367 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
368 int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
370 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
371 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
372 int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
374 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
375 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
376 int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
378 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
379 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
380 int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
382 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
383 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
384 int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
386 rack_drop_checks(struct tcpopt *to, struct mbuf *m,
387 struct tcphdr *th, struct tcpcb *tp, int32_t * tlenp, int32_t * ti_locked, int32_t * thf,
388 int32_t * drop_hdrlen, int32_t * ret_val);
390 rack_process_rst(struct mbuf *m, struct tcphdr *th,
391 struct socket *so, struct tcpcb *tp, int32_t * ti_locked);
392 struct rack_sendmap *
393 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
395 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt);
397 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th);
400 rack_ts_check(struct mbuf *m, struct tcphdr *th,
401 struct tcpcb *tp, int32_t * ti_locked, int32_t tlen, int32_t thflags, int32_t * ret_val);
403 int32_t rack_clear_counter=0;
407 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
412 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
413 if (error || req->newptr == NULL)
416 error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
421 printf("Clearing RACK counters\n");
423 counter_u64_zero(rack_badfr);
424 counter_u64_zero(rack_badfr_bytes);
425 counter_u64_zero(rack_rtm_prr_retran);
426 counter_u64_zero(rack_rtm_prr_newdata);
427 counter_u64_zero(rack_timestamp_mismatch);
428 counter_u64_zero(rack_reorder_seen);
429 counter_u64_zero(rack_tlp_tot);
430 counter_u64_zero(rack_tlp_newdata);
431 counter_u64_zero(rack_tlp_retran);
432 counter_u64_zero(rack_tlp_retran_bytes);
433 counter_u64_zero(rack_tlp_retran_fail);
434 counter_u64_zero(rack_to_tot);
435 counter_u64_zero(rack_to_arm_rack);
436 counter_u64_zero(rack_to_arm_tlp);
437 counter_u64_zero(rack_paced_segments);
438 counter_u64_zero(rack_unpaced_segments);
439 counter_u64_zero(rack_saw_enobuf);
440 counter_u64_zero(rack_saw_enetunreach);
441 counter_u64_zero(rack_to_alloc_hard);
442 counter_u64_zero(rack_to_alloc_emerg);
443 counter_u64_zero(rack_sack_proc_all);
444 counter_u64_zero(rack_sack_proc_short);
445 counter_u64_zero(rack_sack_proc_restart);
446 counter_u64_zero(rack_to_alloc);
447 counter_u64_zero(rack_find_high);
448 counter_u64_zero(rack_runt_sacks);
449 counter_u64_zero(rack_used_tlpmethod);
450 counter_u64_zero(rack_used_tlpmethod2);
451 counter_u64_zero(rack_enter_tlp_calc);
452 counter_u64_zero(rack_progress_drops);
453 counter_u64_zero(rack_tlp_does_nada);
455 rack_clear_counter = 0;
464 SYSCTL_ADD_S32(&rack_sysctl_ctx,
465 SYSCTL_CHILDREN(rack_sysctl_root),
466 OID_AUTO, "rate_sample_method", CTLFLAG_RW,
467 &rack_rate_sample_method , USE_RTT_LOW,
468 "What method should we use for rate sampling 0=high, 1=low ");
469 SYSCTL_ADD_S32(&rack_sysctl_ctx,
470 SYSCTL_CHILDREN(rack_sysctl_root),
471 OID_AUTO, "data_after_close", CTLFLAG_RW,
472 &rack_ignore_data_after_close, 0,
473 "Do we hold off sending a RST until all pending data is ack'd");
474 SYSCTL_ADD_S32(&rack_sysctl_ctx,
475 SYSCTL_CHILDREN(rack_sysctl_root),
476 OID_AUTO, "tlpmethod", CTLFLAG_RW,
477 &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
478 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
479 SYSCTL_ADD_S32(&rack_sysctl_ctx,
480 SYSCTL_CHILDREN(rack_sysctl_root),
481 OID_AUTO, "min_pace_time", CTLFLAG_RW,
482 &rack_min_pace_time, 0,
483 "Should we enforce a minimum pace time of 1ms");
484 SYSCTL_ADD_S32(&rack_sysctl_ctx,
485 SYSCTL_CHILDREN(rack_sysctl_root),
486 OID_AUTO, "min_pace_segs", CTLFLAG_RW,
487 &rack_min_pace_time_seg_req, 6,
488 "How many segments have to be in the len to enforce min-pace-time");
489 SYSCTL_ADD_S32(&rack_sysctl_ctx,
490 SYSCTL_CHILDREN(rack_sysctl_root),
491 OID_AUTO, "idle_reduce_high", CTLFLAG_RW,
492 &rack_reduce_largest_on_idle, 0,
493 "Should we reduce the largest cwnd seen to IW on idle reduction");
494 SYSCTL_ADD_S32(&rack_sysctl_ctx,
495 SYSCTL_CHILDREN(rack_sysctl_root),
496 OID_AUTO, "bb_verbose", CTLFLAG_RW,
497 &rack_verbose_logging, 0,
498 "Should RACK black box logging be verbose");
499 SYSCTL_ADD_S32(&rack_sysctl_ctx,
500 SYSCTL_CHILDREN(rack_sysctl_root),
501 OID_AUTO, "sackfiltering", CTLFLAG_RW,
502 &rack_use_sack_filter, 1,
503 "Do we use sack filtering?");
504 SYSCTL_ADD_S32(&rack_sysctl_ctx,
505 SYSCTL_CHILDREN(rack_sysctl_root),
506 OID_AUTO, "delayed_ack", CTLFLAG_RW,
507 &rack_delayed_ack_time, 200,
508 "Delayed ack time (200ms)");
509 SYSCTL_ADD_S32(&rack_sysctl_ctx,
510 SYSCTL_CHILDREN(rack_sysctl_root),
511 OID_AUTO, "tlpminto", CTLFLAG_RW,
513 "TLP minimum timeout per the specification (10ms)");
514 SYSCTL_ADD_S32(&rack_sysctl_ctx,
515 SYSCTL_CHILDREN(rack_sysctl_root),
516 OID_AUTO, "precache", CTLFLAG_RW,
518 "Where should we precache the mcopy (0 is not at all)");
519 SYSCTL_ADD_S32(&rack_sysctl_ctx,
520 SYSCTL_CHILDREN(rack_sysctl_root),
521 OID_AUTO, "sblklimit", CTLFLAG_RW,
522 &rack_sack_block_limit, 128,
523 "When do we start paying attention to small sack blocks");
524 SYSCTL_ADD_S32(&rack_sysctl_ctx,
525 SYSCTL_CHILDREN(rack_sysctl_root),
526 OID_AUTO, "send_oldest", CTLFLAG_RW,
527 &rack_always_send_oldest, 1,
528 "Should we always send the oldest TLP and RACK-TLP");
529 SYSCTL_ADD_S32(&rack_sysctl_ctx,
530 SYSCTL_CHILDREN(rack_sysctl_root),
531 OID_AUTO, "rack_tlp_in_recovery", CTLFLAG_RW,
532 &rack_tlp_in_recovery, 1,
533 "Can we do a TLP during recovery?");
534 SYSCTL_ADD_S32(&rack_sysctl_ctx,
535 SYSCTL_CHILDREN(rack_sysctl_root),
536 OID_AUTO, "rack_tlimit", CTLFLAG_RW,
537 &rack_limited_retran, 0,
538 "How many times can a rack timeout drive out sends");
539 SYSCTL_ADD_S32(&rack_sysctl_ctx,
540 SYSCTL_CHILDREN(rack_sysctl_root),
541 OID_AUTO, "minrto", CTLFLAG_RW,
543 "Minimum RTO in ms -- set with caution below 1000 due to TLP");
544 SYSCTL_ADD_S32(&rack_sysctl_ctx,
545 SYSCTL_CHILDREN(rack_sysctl_root),
546 OID_AUTO, "maxrto", CTLFLAG_RW,
548 "Maxiumum RTO in ms -- should be at least as large as min_rto");
549 SYSCTL_ADD_S32(&rack_sysctl_ctx,
550 SYSCTL_CHILDREN(rack_sysctl_root),
551 OID_AUTO, "tlp_retry", CTLFLAG_RW,
552 &rack_tlp_max_resend, 2,
553 "How many times does TLP retry a single segment or multiple with no ACK");
554 SYSCTL_ADD_S32(&rack_sysctl_ctx,
555 SYSCTL_CHILDREN(rack_sysctl_root),
556 OID_AUTO, "recovery_loss_prop", CTLFLAG_RW,
557 &rack_use_proportional_reduce, 0,
558 "Should we proportionaly reduce cwnd based on the number of losses ");
559 SYSCTL_ADD_S32(&rack_sysctl_ctx,
560 SYSCTL_CHILDREN(rack_sysctl_root),
561 OID_AUTO, "recovery_prop", CTLFLAG_RW,
562 &rack_proportional_rate, 10,
563 "What percent reduction per loss");
564 SYSCTL_ADD_S32(&rack_sysctl_ctx,
565 SYSCTL_CHILDREN(rack_sysctl_root),
566 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
567 &rack_lower_cwnd_at_tlp, 0,
568 "When a TLP completes a retran should we enter recovery?");
569 SYSCTL_ADD_S32(&rack_sysctl_ctx,
570 SYSCTL_CHILDREN(rack_sysctl_root),
571 OID_AUTO, "hptsi_reduces", CTLFLAG_RW,
572 &rack_slot_reduction, 4,
573 "When setting a slot should we reduce by divisor");
574 SYSCTL_ADD_S32(&rack_sysctl_ctx,
575 SYSCTL_CHILDREN(rack_sysctl_root),
576 OID_AUTO, "hptsi_every_seg", CTLFLAG_RW,
577 &rack_pace_every_seg, 1,
578 "Should we pace out every segment hptsi");
579 SYSCTL_ADD_S32(&rack_sysctl_ctx,
580 SYSCTL_CHILDREN(rack_sysctl_root),
581 OID_AUTO, "hptsi_seg_max", CTLFLAG_RW,
582 &rack_hptsi_segments, 6,
583 "Should we pace out only a limited size of segments");
584 SYSCTL_ADD_S32(&rack_sysctl_ctx,
585 SYSCTL_CHILDREN(rack_sysctl_root),
586 OID_AUTO, "prr_sendalot", CTLFLAG_RW,
587 &rack_send_a_lot_in_prr, 1,
588 "Send a lot in prr");
589 SYSCTL_ADD_S32(&rack_sysctl_ctx,
590 SYSCTL_CHILDREN(rack_sysctl_root),
591 OID_AUTO, "minto", CTLFLAG_RW,
593 "Minimum rack timeout in milliseconds");
594 SYSCTL_ADD_S32(&rack_sysctl_ctx,
595 SYSCTL_CHILDREN(rack_sysctl_root),
596 OID_AUTO, "earlyrecoveryseg", CTLFLAG_RW,
597 &rack_early_recovery_max_seg, 6,
598 "Max segments in early recovery");
599 SYSCTL_ADD_S32(&rack_sysctl_ctx,
600 SYSCTL_CHILDREN(rack_sysctl_root),
601 OID_AUTO, "earlyrecovery", CTLFLAG_RW,
602 &rack_early_recovery, 1,
603 "Do we do early recovery with rack");
604 SYSCTL_ADD_S32(&rack_sysctl_ctx,
605 SYSCTL_CHILDREN(rack_sysctl_root),
606 OID_AUTO, "reorder_thresh", CTLFLAG_RW,
607 &rack_reorder_thresh, 2,
608 "What factor for rack will be added when seeing reordering (shift right)");
609 SYSCTL_ADD_S32(&rack_sysctl_ctx,
610 SYSCTL_CHILDREN(rack_sysctl_root),
611 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
613 "what divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
614 SYSCTL_ADD_S32(&rack_sysctl_ctx,
615 SYSCTL_CHILDREN(rack_sysctl_root),
616 OID_AUTO, "reorder_fade", CTLFLAG_RW,
617 &rack_reorder_fade, 0,
618 "Does reorder detection fade, if so how many ms (0 means never)");
619 SYSCTL_ADD_S32(&rack_sysctl_ctx,
620 SYSCTL_CHILDREN(rack_sysctl_root),
621 OID_AUTO, "pktdelay", CTLFLAG_RW,
623 "Extra RACK time (in ms) besides reordering thresh");
624 SYSCTL_ADD_S32(&rack_sysctl_ctx,
625 SYSCTL_CHILDREN(rack_sysctl_root),
626 OID_AUTO, "inc_var", CTLFLAG_RW,
628 "Should rack add to the TLP timer the variance in rtt calculation");
629 rack_badfr = counter_u64_alloc(M_WAITOK);
630 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
631 SYSCTL_CHILDREN(rack_sysctl_root),
632 OID_AUTO, "badfr", CTLFLAG_RD,
633 &rack_badfr, "Total number of bad FRs");
634 rack_badfr_bytes = counter_u64_alloc(M_WAITOK);
635 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
636 SYSCTL_CHILDREN(rack_sysctl_root),
637 OID_AUTO, "badfr_bytes", CTLFLAG_RD,
638 &rack_badfr_bytes, "Total number of bad FRs");
639 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK);
640 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
641 SYSCTL_CHILDREN(rack_sysctl_root),
642 OID_AUTO, "prrsndret", CTLFLAG_RD,
643 &rack_rtm_prr_retran,
644 "Total number of prr based retransmits");
645 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK);
646 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
647 SYSCTL_CHILDREN(rack_sysctl_root),
648 OID_AUTO, "prrsndnew", CTLFLAG_RD,
649 &rack_rtm_prr_newdata,
650 "Total number of prr based new transmits");
651 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK);
652 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
653 SYSCTL_CHILDREN(rack_sysctl_root),
654 OID_AUTO, "tsnf", CTLFLAG_RD,
655 &rack_timestamp_mismatch,
656 "Total number of timestamps that we could not find the reported ts");
657 rack_find_high = counter_u64_alloc(M_WAITOK);
658 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
659 SYSCTL_CHILDREN(rack_sysctl_root),
660 OID_AUTO, "findhigh", CTLFLAG_RD,
662 "Total number of FIN causing find-high");
663 rack_reorder_seen = counter_u64_alloc(M_WAITOK);
664 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
665 SYSCTL_CHILDREN(rack_sysctl_root),
666 OID_AUTO, "reordering", CTLFLAG_RD,
668 "Total number of times we added delay due to reordering");
669 rack_tlp_tot = counter_u64_alloc(M_WAITOK);
670 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
671 SYSCTL_CHILDREN(rack_sysctl_root),
672 OID_AUTO, "tlp_to_total", CTLFLAG_RD,
674 "Total number of tail loss probe expirations");
675 rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
676 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
677 SYSCTL_CHILDREN(rack_sysctl_root),
678 OID_AUTO, "tlp_new", CTLFLAG_RD,
680 "Total number of tail loss probe sending new data");
682 rack_tlp_retran = counter_u64_alloc(M_WAITOK);
683 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
684 SYSCTL_CHILDREN(rack_sysctl_root),
685 OID_AUTO, "tlp_retran", CTLFLAG_RD,
687 "Total number of tail loss probe sending retransmitted data");
688 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
689 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
690 SYSCTL_CHILDREN(rack_sysctl_root),
691 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
692 &rack_tlp_retran_bytes,
693 "Total bytes of tail loss probe sending retransmitted data");
694 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK);
695 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
696 SYSCTL_CHILDREN(rack_sysctl_root),
697 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD,
698 &rack_tlp_retran_fail,
699 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)");
700 rack_to_tot = counter_u64_alloc(M_WAITOK);
701 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
702 SYSCTL_CHILDREN(rack_sysctl_root),
703 OID_AUTO, "rack_to_tot", CTLFLAG_RD,
705 "Total number of times the rack to expired?");
706 rack_to_arm_rack = counter_u64_alloc(M_WAITOK);
707 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
708 SYSCTL_CHILDREN(rack_sysctl_root),
709 OID_AUTO, "arm_rack", CTLFLAG_RD,
711 "Total number of times the rack timer armed?");
712 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK);
713 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
714 SYSCTL_CHILDREN(rack_sysctl_root),
715 OID_AUTO, "arm_tlp", CTLFLAG_RD,
717 "Total number of times the tlp timer armed?");
718 rack_paced_segments = counter_u64_alloc(M_WAITOK);
719 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
720 SYSCTL_CHILDREN(rack_sysctl_root),
721 OID_AUTO, "paced", CTLFLAG_RD,
722 &rack_paced_segments,
723 "Total number of times a segment send caused hptsi");
724 rack_unpaced_segments = counter_u64_alloc(M_WAITOK);
725 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
726 SYSCTL_CHILDREN(rack_sysctl_root),
727 OID_AUTO, "unpaced", CTLFLAG_RD,
728 &rack_unpaced_segments,
729 "Total number of times a segment did not cause hptsi");
730 rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
731 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
732 SYSCTL_CHILDREN(rack_sysctl_root),
733 OID_AUTO, "saw_enobufs", CTLFLAG_RD,
735 "Total number of times a segment did not cause hptsi");
736 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
737 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
738 SYSCTL_CHILDREN(rack_sysctl_root),
739 OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
740 &rack_saw_enetunreach,
741 "Total number of times a segment did not cause hptsi");
742 rack_to_alloc = counter_u64_alloc(M_WAITOK);
743 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
744 SYSCTL_CHILDREN(rack_sysctl_root),
745 OID_AUTO, "allocs", CTLFLAG_RD,
747 "Total allocations of tracking structures");
748 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
750 SYSCTL_CHILDREN(rack_sysctl_root),
751 OID_AUTO, "allochard", CTLFLAG_RD,
753 "Total allocations done with sleeping the hard way");
754 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
755 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
756 SYSCTL_CHILDREN(rack_sysctl_root),
757 OID_AUTO, "allocemerg", CTLFLAG_RD,
758 &rack_to_alloc_emerg,
759 "Total alocations done from emergency cache");
760 rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
761 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
762 SYSCTL_CHILDREN(rack_sysctl_root),
763 OID_AUTO, "sack_long", CTLFLAG_RD,
765 "Total times we had to walk whole list for sack processing");
767 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
768 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
769 SYSCTL_CHILDREN(rack_sysctl_root),
770 OID_AUTO, "sack_restart", CTLFLAG_RD,
771 &rack_sack_proc_restart,
772 "Total times we had to walk whole list due to a restart");
773 rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
774 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
775 SYSCTL_CHILDREN(rack_sysctl_root),
776 OID_AUTO, "sack_short", CTLFLAG_RD,
777 &rack_sack_proc_short,
778 "Total times we took shortcut for sack processing");
779 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK);
780 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
781 SYSCTL_CHILDREN(rack_sysctl_root),
782 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD,
783 &rack_enter_tlp_calc,
784 "Total times we called calc-tlp");
785 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK);
786 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
787 SYSCTL_CHILDREN(rack_sysctl_root),
788 OID_AUTO, "hit_tlp_method", CTLFLAG_RD,
789 &rack_used_tlpmethod,
790 "Total number of runt sacks");
791 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK);
792 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
793 SYSCTL_CHILDREN(rack_sysctl_root),
794 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD,
795 &rack_used_tlpmethod2,
796 "Total number of runt sacks 2");
797 rack_runt_sacks = counter_u64_alloc(M_WAITOK);
798 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
799 SYSCTL_CHILDREN(rack_sysctl_root),
800 OID_AUTO, "runtsacks", CTLFLAG_RD,
802 "Total number of runt sacks");
803 rack_progress_drops = counter_u64_alloc(M_WAITOK);
804 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
805 SYSCTL_CHILDREN(rack_sysctl_root),
806 OID_AUTO, "prog_drops", CTLFLAG_RD,
807 &rack_progress_drops,
808 "Total number of progress drops");
809 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
810 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
811 SYSCTL_CHILDREN(rack_sysctl_root),
812 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
813 &rack_input_idle_reduces,
814 "Total number of idle reductions on input");
815 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK);
816 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
817 SYSCTL_CHILDREN(rack_sysctl_root),
818 OID_AUTO, "tlp_nada", CTLFLAG_RD,
820 "Total number of nada tlp calls");
821 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
822 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
823 OID_AUTO, "outsize", CTLFLAG_RD,
824 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
825 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
826 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
827 OID_AUTO, "opts", CTLFLAG_RD,
828 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
829 SYSCTL_ADD_PROC(&rack_sysctl_ctx,
830 SYSCTL_CHILDREN(rack_sysctl_root),
831 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
832 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
835 static inline int32_t
836 rack_progress_timeout_check(struct tcpcb *tp)
838 if (tp->t_maxunacktime && tp->t_acktime && TSTMP_GT(ticks, tp->t_acktime)) {
839 if ((ticks - tp->t_acktime) >= tp->t_maxunacktime) {
841 * There is an assumption that the caller
842 * will drop the connection so we will
843 * increment the counters here.
845 struct tcp_rack *rack;
846 rack = (struct tcp_rack *)tp->t_fb_ptr;
847 counter_u64_add(rack_progress_drops, 1);
849 TCPSTAT_INC(tcps_progdrops);
851 rack_log_progress_event(rack, tp, ticks, PROGRESS_DROP, __LINE__);
860 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
862 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
863 union tcp_log_stackspecific log;
865 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
866 log.u_bbr.flex1 = TICKS_2_MSEC(rack->rc_tp->t_srtt >> TCP_RTT_SHIFT);
867 log.u_bbr.flex2 = to;
868 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
869 log.u_bbr.flex4 = slot;
870 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot;
871 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
872 log.u_bbr.flex8 = which;
873 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
874 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
875 TCP_LOG_EVENT(rack->rc_tp, NULL,
876 &rack->rc_inp->inp_socket->so_rcv,
877 &rack->rc_inp->inp_socket->so_snd,
878 BBR_LOG_TIMERSTAR, 0,
884 rack_log_to_event(struct tcp_rack *rack, int32_t to_num)
886 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
887 union tcp_log_stackspecific log;
889 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
890 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
891 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
892 log.u_bbr.flex8 = to_num;
893 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
894 log.u_bbr.flex2 = rack->rc_rack_rtt;
895 TCP_LOG_EVENT(rack->rc_tp, NULL,
896 &rack->rc_inp->inp_socket->so_rcv,
897 &rack->rc_inp->inp_socket->so_snd,
904 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, int32_t t,
905 uint32_t o_srtt, uint32_t o_var)
907 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
908 union tcp_log_stackspecific log;
910 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
911 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
912 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
914 log.u_bbr.flex2 = o_srtt;
915 log.u_bbr.flex3 = o_var;
916 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
917 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;
918 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_rtt_cnt;
919 log.u_bbr.rttProp = rack->r_ctl.rack_rs.rs_rtt_tot;
920 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
921 TCP_LOG_EVENT(tp, NULL,
922 &rack->rc_inp->inp_socket->so_rcv,
923 &rack->rc_inp->inp_socket->so_snd,
930 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
933 * Log the rtt sample we are
934 * applying to the srtt algorithm in
937 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
938 union tcp_log_stackspecific log;
941 /* Convert our ms to a microsecond */
942 log.u_bbr.flex1 = rtt * 1000;
943 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
944 TCP_LOG_EVENTP(rack->rc_tp, NULL,
945 &rack->rc_inp->inp_socket->so_rcv,
946 &rack->rc_inp->inp_socket->so_snd,
948 0, &log, false, &tv);
954 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line)
956 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
957 union tcp_log_stackspecific log;
959 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
960 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
961 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
962 log.u_bbr.flex1 = line;
963 log.u_bbr.flex2 = tick;
964 log.u_bbr.flex3 = tp->t_maxunacktime;
965 log.u_bbr.flex4 = tp->t_acktime;
966 log.u_bbr.flex8 = event;
967 TCP_LOG_EVENT(tp, NULL,
968 &rack->rc_inp->inp_socket->so_rcv,
969 &rack->rc_inp->inp_socket->so_snd,
976 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts)
978 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
979 union tcp_log_stackspecific log;
981 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
982 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
983 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
984 log.u_bbr.flex1 = slot;
985 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
986 log.u_bbr.flex8 = rack->rc_in_persist;
987 TCP_LOG_EVENT(rack->rc_tp, NULL,
988 &rack->rc_inp->inp_socket->so_rcv,
989 &rack->rc_inp->inp_socket->so_snd,
996 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out)
998 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
999 union tcp_log_stackspecific log;
1000 log.u_bbr.flex1 = did_out;
1001 log.u_bbr.flex2 = nxt_pkt;
1002 log.u_bbr.flex3 = way_out;
1003 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
1004 log.u_bbr.flex7 = rack->r_wanted_output;
1005 log.u_bbr.flex8 = rack->rc_in_persist;
1006 TCP_LOG_EVENT(rack->rc_tp, NULL,
1007 &rack->rc_inp->inp_socket->so_rcv,
1008 &rack->rc_inp->inp_socket->so_snd,
1009 BBR_LOG_DOSEG_DONE, 0,
1016 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, uint8_t hpts_calling)
1018 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1019 union tcp_log_stackspecific log;
1021 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1022 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1023 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1024 log.u_bbr.flex1 = slot;
1025 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
1026 log.u_bbr.flex7 = hpts_calling;
1027 log.u_bbr.flex8 = rack->rc_in_persist;
1028 TCP_LOG_EVENT(rack->rc_tp, NULL,
1029 &rack->rc_inp->inp_socket->so_rcv,
1030 &rack->rc_inp->inp_socket->so_snd,
1037 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line)
1039 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1040 union tcp_log_stackspecific log;
1042 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1043 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1044 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1045 log.u_bbr.flex1 = line;
1046 log.u_bbr.flex2 = 0;
1047 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
1048 log.u_bbr.flex4 = 0;
1049 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
1050 log.u_bbr.flex8 = hpts_removed;
1051 TCP_LOG_EVENT(rack->rc_tp, NULL,
1052 &rack->rc_inp->inp_socket->so_rcv,
1053 &rack->rc_inp->inp_socket->so_snd,
1054 BBR_LOG_TIMERCANC, 0,
1060 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
1062 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1063 union tcp_log_stackspecific log;
1065 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1066 log.u_bbr.flex1 = timers;
1067 log.u_bbr.flex2 = ret;
1068 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
1069 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
1070 log.u_bbr.flex5 = cts;
1071 TCP_LOG_EVENT(rack->rc_tp, NULL,
1072 &rack->rc_inp->inp_socket->so_rcv,
1073 &rack->rc_inp->inp_socket->so_snd,
1074 BBR_LOG_TO_PROCESS, 0,
1080 rack_counter_destroy()
1082 counter_u64_free(rack_badfr);
1083 counter_u64_free(rack_badfr_bytes);
1084 counter_u64_free(rack_rtm_prr_retran);
1085 counter_u64_free(rack_rtm_prr_newdata);
1086 counter_u64_free(rack_timestamp_mismatch);
1087 counter_u64_free(rack_reorder_seen);
1088 counter_u64_free(rack_tlp_tot);
1089 counter_u64_free(rack_tlp_newdata);
1090 counter_u64_free(rack_tlp_retran);
1091 counter_u64_free(rack_tlp_retran_bytes);
1092 counter_u64_free(rack_tlp_retran_fail);
1093 counter_u64_free(rack_to_tot);
1094 counter_u64_free(rack_to_arm_rack);
1095 counter_u64_free(rack_to_arm_tlp);
1096 counter_u64_free(rack_paced_segments);
1097 counter_u64_free(rack_unpaced_segments);
1098 counter_u64_free(rack_saw_enobuf);
1099 counter_u64_free(rack_saw_enetunreach);
1100 counter_u64_free(rack_to_alloc_hard);
1101 counter_u64_free(rack_to_alloc_emerg);
1102 counter_u64_free(rack_sack_proc_all);
1103 counter_u64_free(rack_sack_proc_short);
1104 counter_u64_free(rack_sack_proc_restart);
1105 counter_u64_free(rack_to_alloc);
1106 counter_u64_free(rack_find_high);
1107 counter_u64_free(rack_runt_sacks);
1108 counter_u64_free(rack_enter_tlp_calc);
1109 counter_u64_free(rack_used_tlpmethod);
1110 counter_u64_free(rack_used_tlpmethod2);
1111 counter_u64_free(rack_progress_drops);
1112 counter_u64_free(rack_input_idle_reduces);
1113 counter_u64_free(rack_tlp_does_nada);
1114 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
1115 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
1118 static struct rack_sendmap *
1119 rack_alloc(struct tcp_rack *rack)
1121 struct rack_sendmap *rsm;
1123 counter_u64_add(rack_to_alloc, 1);
1124 rack->r_ctl.rc_num_maps_alloced++;
1125 rsm = uma_zalloc(rack_zone, M_NOWAIT);
1129 if (rack->rc_free_cnt) {
1130 counter_u64_add(rack_to_alloc_emerg, 1);
1131 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
1132 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_next);
1133 rack->rc_free_cnt--;
1140 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
1142 rack->r_ctl.rc_num_maps_alloced--;
1143 if (rack->r_ctl.rc_tlpsend == rsm)
1144 rack->r_ctl.rc_tlpsend = NULL;
1145 if (rack->r_ctl.rc_next == rsm)
1146 rack->r_ctl.rc_next = NULL;
1147 if (rack->r_ctl.rc_sacklast == rsm)
1148 rack->r_ctl.rc_sacklast = NULL;
1149 if (rack->rc_free_cnt < rack_free_cache) {
1150 memset(rsm, 0, sizeof(struct rack_sendmap));
1151 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_next);
1152 rack->rc_free_cnt++;
1155 uma_zfree(rack_zone, rsm);
1159 * CC wrapper hook functions
1162 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, struct tcphdr *th, uint16_t nsegs,
1163 uint16_t type, int32_t recovery)
1165 #ifdef NETFLIX_STATS
1169 u_long old_cwnd = tp->snd_cwnd;
1172 INP_WLOCK_ASSERT(tp->t_inpcb);
1173 tp->ccv->nsegs = nsegs;
1174 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
1175 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
1178 max = rack->r_ctl.rc_early_recovery_segs * tp->t_maxseg;
1179 if (tp->ccv->bytes_this_ack > max) {
1180 tp->ccv->bytes_this_ack = max;
1183 if (tp->snd_cwnd <= tp->snd_wnd)
1184 tp->ccv->flags |= CCF_CWND_LIMITED;
1186 tp->ccv->flags &= ~CCF_CWND_LIMITED;
1188 if (type == CC_ACK) {
1189 #ifdef NETFLIX_STATS
1190 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
1191 ((int32_t) tp->snd_cwnd) - tp->snd_wnd);
1192 if ((tp->t_flags & TF_GPUTINPROG) &&
1193 SEQ_GEQ(th->th_ack, tp->gput_ack)) {
1194 gput = (((int64_t) (th->th_ack - tp->gput_seq)) << 3) /
1195 max(1, tcp_ts_getticks() - tp->gput_ts);
1196 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
1199 * XXXLAS: This is a temporary hack, and should be
1200 * chained off VOI_TCP_GPUT when stats(9) grows an
1201 * API to deal with chained VOIs.
1203 if (tp->t_stats_gput_prev > 0)
1204 stats_voi_update_abs_s32(tp->t_stats,
1206 ((gput - tp->t_stats_gput_prev) * 100) /
1207 tp->t_stats_gput_prev);
1208 tp->t_flags &= ~TF_GPUTINPROG;
1209 tp->t_stats_gput_prev = gput;
1211 if (tp->t_maxpeakrate) {
1213 * We update t_peakrate_thr. This gives us roughly
1214 * one update per round trip time.
1216 tcp_update_peakrate_thr(tp);
1220 if (tp->snd_cwnd > tp->snd_ssthresh) {
1221 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
1222 nsegs * V_tcp_abc_l_var * tp->t_maxseg);
1223 if (tp->t_bytes_acked >= tp->snd_cwnd) {
1224 tp->t_bytes_acked -= tp->snd_cwnd;
1225 tp->ccv->flags |= CCF_ABC_SENTAWND;
1228 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
1229 tp->t_bytes_acked = 0;
1232 if (CC_ALGO(tp)->ack_received != NULL) {
1233 /* XXXLAS: Find a way to live without this */
1234 tp->ccv->curack = th->th_ack;
1235 CC_ALGO(tp)->ack_received(tp->ccv, type);
1237 #ifdef NETFLIX_STATS
1238 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd);
1240 if (rack->r_ctl.rc_rack_largest_cwnd < tp->snd_cwnd) {
1241 rack->r_ctl.rc_rack_largest_cwnd = tp->snd_cwnd;
1244 if (tp->cwv_enabled) {
1246 * Per RFC 7661: The behaviour in the non-validated phase is
1247 * specified as: o A sender determines whether to increase
1248 * the cwnd based upon whether it is cwnd-limited (see
1249 * Section 4.5.3): * A sender that is cwnd-limited MAY use
1250 * the standard TCP method to increase cwnd (i.e., the
1251 * standard method permits a TCP sender that fully utilises
1252 * the cwnd to increase the cwnd each time it receives an
1253 * ACK). * A sender that is not cwnd-limited MUST NOT
1254 * increase the cwnd when ACK packets are received in this
1255 * phase (i.e., needs to avoid growing the cwnd when it has
1256 * not recently sent using the current size of cwnd).
1258 if ((tp->snd_cwnd > old_cwnd) &&
1259 (tp->cwv_cwnd_valid == 0) &&
1260 (!(tp->ccv->flags & CCF_CWND_LIMITED))) {
1261 tp->snd_cwnd = old_cwnd;
1263 /* Try to update pipeAck and NCWV state */
1264 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
1265 !IN_RECOVERY(tp->t_flags)) {
1266 uint32_t data = sbavail(&(tp->t_inpcb->inp_socket->so_snd));
1268 tcp_newcwv_update_pipeack(tp, data);
1272 /* we enforce max peak rate if it is set. */
1273 if (tp->t_peakrate_thr && tp->snd_cwnd > tp->t_peakrate_thr) {
1274 tp->snd_cwnd = tp->t_peakrate_thr;
1279 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th)
1281 struct tcp_rack *rack;
1283 rack = (struct tcp_rack *)tp->t_fb_ptr;
1284 INP_WLOCK_ASSERT(tp->t_inpcb);
1285 if (rack->r_ctl.rc_prr_sndcnt > 0)
1286 rack->r_wanted_output++;
1290 rack_post_recovery(struct tcpcb *tp, struct tcphdr *th)
1292 struct tcp_rack *rack;
1294 INP_WLOCK_ASSERT(tp->t_inpcb);
1295 rack = (struct tcp_rack *)tp->t_fb_ptr;
1296 if (CC_ALGO(tp)->post_recovery != NULL) {
1297 tp->ccv->curack = th->th_ack;
1298 CC_ALGO(tp)->post_recovery(tp->ccv);
1301 * Here we can in theory adjust cwnd to be based on the number of
1302 * losses in the window (rack->r_ctl.rc_loss_count). This is done
1303 * based on the rack_use_proportional flag.
1305 if (rack->r_ctl.rc_prop_reduce && rack->r_ctl.rc_prop_rate) {
1308 reduce = (rack->r_ctl.rc_loss_count * rack->r_ctl.rc_prop_rate);
1312 tp->snd_cwnd -= ((reduce * tp->snd_cwnd) / 100);
1314 if (tp->snd_cwnd > tp->snd_ssthresh) {
1315 /* Drop us down to the ssthresh (1/2 cwnd at loss) */
1316 tp->snd_cwnd = tp->snd_ssthresh;
1319 if (rack->r_ctl.rc_prr_sndcnt > 0) {
1320 /* Suck the next prr cnt back into cwnd */
1321 tp->snd_cwnd += rack->r_ctl.rc_prr_sndcnt;
1322 rack->r_ctl.rc_prr_sndcnt = 0;
1324 EXIT_RECOVERY(tp->t_flags);
1328 if (tp->cwv_enabled) {
1329 if ((tp->cwv_cwnd_valid == 0) &&
1330 (tp->snd_cwv.in_recovery))
1331 tcp_newcwv_end_recovery(tp);
1337 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
1339 struct tcp_rack *rack;
1341 INP_WLOCK_ASSERT(tp->t_inpcb);
1343 rack = (struct tcp_rack *)tp->t_fb_ptr;
1346 /* rack->r_ctl.rc_ssthresh_set = 1;*/
1347 if (!IN_FASTRECOVERY(tp->t_flags)) {
1348 rack->r_ctl.rc_tlp_rtx_out = 0;
1349 rack->r_ctl.rc_prr_delivered = 0;
1350 rack->r_ctl.rc_prr_out = 0;
1351 rack->r_ctl.rc_loss_count = 0;
1352 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
1353 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
1354 tp->snd_recover = tp->snd_max;
1355 if (tp->t_flags & TF_ECN_PERMIT)
1356 tp->t_flags |= TF_ECN_SND_CWR;
1360 if (!IN_CONGRECOVERY(tp->t_flags)) {
1361 TCPSTAT_INC(tcps_ecn_rcwnd);
1362 tp->snd_recover = tp->snd_max;
1363 if (tp->t_flags & TF_ECN_PERMIT)
1364 tp->t_flags |= TF_ECN_SND_CWR;
1369 tp->t_bytes_acked = 0;
1370 EXIT_RECOVERY(tp->t_flags);
1371 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
1372 tp->t_maxseg) * tp->t_maxseg;
1373 tp->snd_cwnd = tp->t_maxseg;
1376 TCPSTAT_INC(tcps_sndrexmitbad);
1377 /* RTO was unnecessary, so reset everything. */
1378 tp->snd_cwnd = tp->snd_cwnd_prev;
1379 tp->snd_ssthresh = tp->snd_ssthresh_prev;
1380 tp->snd_recover = tp->snd_recover_prev;
1381 if (tp->t_flags & TF_WASFRECOVERY)
1382 ENTER_FASTRECOVERY(tp->t_flags);
1383 if (tp->t_flags & TF_WASCRECOVERY)
1384 ENTER_CONGRECOVERY(tp->t_flags);
1385 tp->snd_nxt = tp->snd_max;
1386 tp->t_badrxtwin = 0;
1390 if (CC_ALGO(tp)->cong_signal != NULL) {
1392 tp->ccv->curack = th->th_ack;
1393 CC_ALGO(tp)->cong_signal(tp->ccv, type);
1396 if (tp->cwv_enabled) {
1397 if (tp->snd_cwv.in_recovery == 0 && IN_RECOVERY(tp->t_flags)) {
1398 tcp_newcwv_enter_recovery(tp);
1400 if (type == CC_RTO) {
1401 tcp_newcwv_reset(tp);
1410 rack_cc_after_idle(struct tcpcb *tp, int reduce_largest)
1414 INP_WLOCK_ASSERT(tp->t_inpcb);
1416 #ifdef NETFLIX_STATS
1417 TCPSTAT_INC(tcps_idle_restarts);
1418 if (tp->t_state == TCPS_ESTABLISHED)
1419 TCPSTAT_INC(tcps_idle_estrestarts);
1421 if (CC_ALGO(tp)->after_idle != NULL)
1422 CC_ALGO(tp)->after_idle(tp->ccv);
1424 if (tp->snd_cwnd == 1)
1425 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
1426 else if (V_tcp_initcwnd_segments)
1427 i_cwnd = min((V_tcp_initcwnd_segments * tp->t_maxseg),
1428 max(2 * tp->t_maxseg, V_tcp_initcwnd_segments * 1460));
1429 else if (V_tcp_do_rfc3390)
1430 i_cwnd = min(4 * tp->t_maxseg,
1431 max(2 * tp->t_maxseg, 4380));
1433 /* Per RFC5681 Section 3.1 */
1434 if (tp->t_maxseg > 2190)
1435 i_cwnd = 2 * tp->t_maxseg;
1436 else if (tp->t_maxseg > 1095)
1437 i_cwnd = 3 * tp->t_maxseg;
1439 i_cwnd = 4 * tp->t_maxseg;
1441 if (reduce_largest) {
1443 * Do we reduce the largest cwnd to make
1444 * rack play nice on restart hptsi wise?
1446 if (((struct tcp_rack *)tp->t_fb_ptr)->r_ctl.rc_rack_largest_cwnd > i_cwnd)
1447 ((struct tcp_rack *)tp->t_fb_ptr)->r_ctl.rc_rack_largest_cwnd = i_cwnd;
1450 * Being idle is no differnt than the initial window. If the cc
1451 * clamps it down below the initial window raise it to the initial
1454 if (tp->snd_cwnd < i_cwnd) {
1455 tp->snd_cwnd = i_cwnd;
1461 * Indicate whether this ack should be delayed. We can delay the ack if
1462 * following conditions are met:
1463 * - There is no delayed ack timer in progress.
1464 * - Our last ack wasn't a 0-sized window. We never want to delay
1465 * the ack that opens up a 0-sized window.
1466 * - LRO wasn't used for this segment. We make sure by checking that the
1467 * segment size is not larger than the MSS.
1468 * - Delayed acks are enabled or this is a half-synchronized T/TCP
1471 #define DELAY_ACK(tp, tlen) \
1472 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
1473 ((tp->t_flags & TF_DELACK) == 0) && \
1474 (tlen <= tp->t_maxseg) && \
1475 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
1478 rack_calc_rwin(struct socket *so, struct tcpcb *tp)
1483 * Calculate amount of space in receive window, and then do TCP
1484 * input processing. Receive window is amount of space in rcv queue,
1485 * but not less than advertised window.
1487 win = sbspace(&so->so_rcv);
1490 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1494 rack_do_drop(struct mbuf *m, struct tcpcb *tp, int32_t * ti_locked)
1496 if (*ti_locked == TI_RLOCKED) {
1497 INP_INFO_RUNLOCK(&V_tcbinfo);
1498 *ti_locked = TI_UNLOCKED;
1501 * Drop space held by incoming segment and return.
1504 INP_WUNLOCK(tp->t_inpcb);
1510 rack_do_dropwithreset(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t * ti_locked, int32_t rstreason, int32_t tlen)
1512 if (*ti_locked == TI_RLOCKED) {
1513 INP_INFO_RUNLOCK(&V_tcbinfo);
1514 *ti_locked = TI_UNLOCKED;
1517 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1518 INP_WUNLOCK(tp->t_inpcb);
1520 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1524 * The value in ret_val informs the caller
1525 * if we dropped the tcb (and lock) or not.
1526 * 1 = we dropped it, 0 = the TCB is still locked
1530 rack_do_dropafterack(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t * ti_locked, int32_t thflags, int32_t tlen, int32_t * ret_val)
1533 * Generate an ACK dropping incoming segment if it occupies sequence
1534 * space, where the ACK reflects our state.
1536 * We can now skip the test for the RST flag since all paths to this
1537 * code happen after packets containing RST have been dropped.
1539 * In the SYN-RECEIVED state, don't send an ACK unless the segment
1540 * we received passes the SYN-RECEIVED ACK test. If it fails send a
1541 * RST. This breaks the loop in the "LAND" DoS attack, and also
1542 * prevents an ACK storm between two listening ports that have been
1543 * sent forged SYN segments, each with the source address of the
1546 struct tcp_rack *rack;
1548 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
1549 (SEQ_GT(tp->snd_una, th->th_ack) ||
1550 SEQ_GT(th->th_ack, tp->snd_max))) {
1552 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
1556 if (*ti_locked == TI_RLOCKED) {
1557 INP_INFO_RUNLOCK(&V_tcbinfo);
1558 *ti_locked = TI_UNLOCKED;
1560 rack = (struct tcp_rack *)tp->t_fb_ptr;
1561 rack->r_wanted_output++;
1562 tp->t_flags |= TF_ACKNOW;
1569 rack_process_rst(struct mbuf *m, struct tcphdr *th, struct socket *so, struct tcpcb *tp, int32_t * ti_locked)
1572 * RFC5961 Section 3.2
1574 * - RST drops connection only if SEG.SEQ == RCV.NXT. - If RST is in
1575 * window, we send challenge ACK.
1577 * Note: to take into account delayed ACKs, we should test against
1578 * last_ack_sent instead of rcv_nxt. Note 2: we handle special case
1579 * of closed window, not covered by the RFC.
1583 if ((SEQ_GEQ(th->th_seq, (tp->last_ack_sent - 1)) &&
1584 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
1585 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) {
1587 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1588 KASSERT(*ti_locked == TI_RLOCKED,
1589 ("%s: TH_RST ti_locked %d, th %p tp %p",
1590 __func__, *ti_locked, th, tp));
1591 KASSERT(tp->t_state != TCPS_SYN_SENT,
1592 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p",
1595 if (V_tcp_insecure_rst ||
1596 (tp->last_ack_sent == th->th_seq) ||
1597 (tp->rcv_nxt == th->th_seq) ||
1598 ((tp->last_ack_sent - 1) == th->th_seq)) {
1599 TCPSTAT_INC(tcps_drops);
1600 /* Drop the connection. */
1601 switch (tp->t_state) {
1602 case TCPS_SYN_RECEIVED:
1603 so->so_error = ECONNREFUSED;
1605 case TCPS_ESTABLISHED:
1606 case TCPS_FIN_WAIT_1:
1607 case TCPS_FIN_WAIT_2:
1608 case TCPS_CLOSE_WAIT:
1611 so->so_error = ECONNRESET;
1613 tcp_state_change(tp, TCPS_CLOSED);
1619 rack_do_drop(m, tp, ti_locked);
1621 TCPSTAT_INC(tcps_badrst);
1622 /* Send challenge ACK. */
1623 tcp_respond(tp, mtod(m, void *), th, m,
1624 tp->rcv_nxt, tp->snd_nxt, TH_ACK);
1625 tp->last_ack_sent = tp->rcv_nxt;
1634 * The value in ret_val informs the caller
1635 * if we dropped the tcb (and lock) or not.
1636 * 1 = we dropped it, 0 = the TCB is still locked
1640 rack_challenge_ack(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * ti_locked, int32_t * ret_val)
1642 KASSERT(*ti_locked == TI_RLOCKED,
1643 ("tcp_do_segment: TH_SYN ti_locked %d", *ti_locked));
1644 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1646 TCPSTAT_INC(tcps_badsyn);
1647 if (V_tcp_insecure_syn &&
1648 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
1649 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
1650 tp = tcp_drop(tp, ECONNRESET);
1652 rack_do_drop(m, tp, ti_locked);
1654 /* Send challenge ACK. */
1655 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt,
1656 tp->snd_nxt, TH_ACK);
1657 tp->last_ack_sent = tp->rcv_nxt;
1660 rack_do_drop(m, NULL, ti_locked);
1665 * rack_ts_check returns 1 for you should not proceed. It places
1666 * in ret_val what should be returned 1/0 by the caller. The 1 indicates
1667 * that the TCB is unlocked and probably dropped. The 0 indicates the
1668 * TCB is still valid and locked.
1671 rack_ts_check(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * ti_locked, int32_t tlen, int32_t thflags, int32_t * ret_val)
1674 /* Check to see if ts_recent is over 24 days old. */
1675 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
1677 * Invalidate ts_recent. If this segment updates ts_recent,
1678 * the age will be reset later and ts_recent will get a
1679 * valid value. If it does not, setting ts_recent to zero
1680 * will at least satisfy the requirement that zero be placed
1681 * in the timestamp echo reply when ts_recent isn't valid.
1682 * The age isn't reset until we get a valid ts_recent
1683 * because we don't want out-of-order segments to be dropped
1684 * when ts_recent is old.
1688 TCPSTAT_INC(tcps_rcvduppack);
1689 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
1690 TCPSTAT_INC(tcps_pawsdrop);
1693 rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, ret_val);
1695 rack_do_drop(m, NULL, ti_locked);
1703 * rack_drop_checks returns 1 for you should not proceed. It places
1704 * in ret_val what should be returned 1/0 by the caller. The 1 indicates
1705 * that the TCB is unlocked and probably dropped. The 0 indicates the
1706 * TCB is still valid and locked.
1709 rack_drop_checks(struct tcpopt *to, struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * tlenp, int32_t * ti_locked, int32_t * thf, int32_t * drop_hdrlen, int32_t * ret_val)
1717 todrop = tp->rcv_nxt - th->th_seq;
1719 if (thflags & TH_SYN) {
1729 * Following if statement from Stevens, vol. 2, p. 960.
1732 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
1734 * Any valid FIN must be to the left of the window.
1735 * At this point the FIN must be a duplicate or out
1736 * of sequence; drop it.
1740 * Send an ACK to resynchronize and drop any data.
1741 * But keep on processing for RST or ACK.
1743 tp->t_flags |= TF_ACKNOW;
1745 TCPSTAT_INC(tcps_rcvduppack);
1746 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
1748 TCPSTAT_INC(tcps_rcvpartduppack);
1749 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
1751 *drop_hdrlen += todrop; /* drop from the top afterwards */
1752 th->th_seq += todrop;
1754 if (th->th_urp > todrop)
1755 th->th_urp -= todrop;
1762 * If segment ends after window, drop trailing data (and PUSH and
1763 * FIN); if nothing left, just ACK.
1765 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
1767 TCPSTAT_INC(tcps_rcvpackafterwin);
1768 if (todrop >= tlen) {
1769 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
1771 * If window is closed can only take segments at
1772 * window edge, and have to drop data and PUSH from
1773 * incoming segments. Continue processing, but
1774 * remember to ack. Otherwise, drop segment and
1777 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1778 tp->t_flags |= TF_ACKNOW;
1779 TCPSTAT_INC(tcps_rcvwinprobe);
1781 rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, ret_val);
1785 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
1788 thflags &= ~(TH_PUSH | TH_FIN);
1795 static struct rack_sendmap *
1796 rack_find_lowest_rsm(struct tcp_rack *rack)
1798 struct rack_sendmap *rsm;
1801 * Walk the time-order transmitted list looking for an rsm that is
1802 * not acked. This will be the one that was sent the longest time
1803 * ago that is still outstanding.
1805 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
1806 if (rsm->r_flags & RACK_ACKED) {
1815 static struct rack_sendmap *
1816 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
1818 struct rack_sendmap *prsm;
1821 * Walk the sequence order list backward until we hit and arrive at
1822 * the highest seq not acked. In theory when this is called it
1823 * should be the last segment (which it was not).
1825 counter_u64_add(rack_find_high, 1);
1827 TAILQ_FOREACH_REVERSE_FROM(prsm, &rack->r_ctl.rc_map, rack_head, r_next) {
1828 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
1838 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts)
1844 * lro is the flag we use to determine if we have seen reordering.
1845 * If it gets set we have seen reordering. The reorder logic either
1846 * works in one of two ways:
1848 * If reorder-fade is configured, then we track the last time we saw
1849 * re-ordering occur. If we reach the point where enough time as
1850 * passed we no longer consider reordering has occuring.
1852 * Or if reorder-face is 0, then once we see reordering we consider
1853 * the connection to alway be subject to reordering and just set lro
1856 * In the end if lro is non-zero we add the extra time for
1861 if (rack->r_ctl.rc_reorder_ts) {
1862 if (rack->r_ctl.rc_reorder_fade) {
1863 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
1864 lro = cts - rack->r_ctl.rc_reorder_ts;
1867 * No time as passed since the last
1868 * reorder, mark it as reordering.
1873 /* Negative time? */
1876 if (lro > rack->r_ctl.rc_reorder_fade) {
1877 /* Turn off reordering seen too */
1878 rack->r_ctl.rc_reorder_ts = 0;
1882 /* Reodering does not fade */
1888 thresh = srtt + rack->r_ctl.rc_pkt_delay;
1890 /* It must be set, if not you get 1/4 rtt */
1891 if (rack->r_ctl.rc_reorder_shift)
1892 thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
1894 thresh += (srtt >> 2);
1898 /* We don't let the rack timeout be above a RTO */
1900 if (thresh > TICKS_2_MSEC(rack->rc_tp->t_rxtcur)) {
1901 thresh = TICKS_2_MSEC(rack->rc_tp->t_rxtcur);
1903 /* And we don't want it above the RTO max either */
1904 if (thresh > rack_rto_max) {
1905 thresh = rack_rto_max;
1911 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
1912 struct rack_sendmap *rsm, uint32_t srtt)
1914 struct rack_sendmap *prsm;
1915 uint32_t thresh, len;
1920 if (rack->r_ctl.rc_tlp_threshold)
1921 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
1923 thresh = (srtt * 2);
1925 /* Get the previous sent packet, if any */
1926 maxseg = tcp_maxseg(tp);
1927 counter_u64_add(rack_enter_tlp_calc, 1);
1928 len = rsm->r_end - rsm->r_start;
1929 if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
1930 /* Exactly like the ID */
1931 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= maxseg) {
1932 uint32_t alt_thresh;
1934 * Compensate for delayed-ack with the d-ack time.
1936 counter_u64_add(rack_used_tlpmethod, 1);
1937 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
1938 if (alt_thresh > thresh)
1939 thresh = alt_thresh;
1941 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
1943 prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
1944 if (prsm && (len <= maxseg)) {
1946 * Two packets outstanding, thresh should be (2*srtt) +
1947 * possible inter-packet delay (if any).
1949 uint32_t inter_gap = 0;
1952 counter_u64_add(rack_used_tlpmethod, 1);
1953 idx = rsm->r_rtr_cnt - 1;
1954 nidx = prsm->r_rtr_cnt - 1;
1955 if (TSTMP_GEQ(rsm->r_tim_lastsent[nidx], prsm->r_tim_lastsent[idx])) {
1956 /* Yes it was sent later (or at the same time) */
1957 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
1959 thresh += inter_gap;
1960 } else if (len <= maxseg) {
1962 * Possibly compensate for delayed-ack.
1964 uint32_t alt_thresh;
1966 counter_u64_add(rack_used_tlpmethod2, 1);
1967 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
1968 if (alt_thresh > thresh)
1969 thresh = alt_thresh;
1971 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
1973 if (len <= maxseg) {
1974 uint32_t alt_thresh;
1976 * Compensate for delayed-ack with the d-ack time.
1978 counter_u64_add(rack_used_tlpmethod, 1);
1979 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
1980 if (alt_thresh > thresh)
1981 thresh = alt_thresh;
1984 /* Not above an RTO */
1985 if (thresh > TICKS_2_MSEC(tp->t_rxtcur)) {
1986 thresh = TICKS_2_MSEC(tp->t_rxtcur);
1988 /* Not above a RTO max */
1989 if (thresh > rack_rto_max) {
1990 thresh = rack_rto_max;
1992 /* Apply user supplied min TLP */
1993 if (thresh < rack_tlp_min) {
1994 thresh = rack_tlp_min;
1999 static struct rack_sendmap *
2000 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
2003 * Check to see that we don't need to fall into recovery. We will
2004 * need to do so if our oldest transmit is past the time we should
2007 struct tcp_rack *rack;
2008 struct rack_sendmap *rsm;
2010 uint32_t srtt_cur, srtt, thresh;
2012 rack = (struct tcp_rack *)tp->t_fb_ptr;
2013 if (TAILQ_EMPTY(&rack->r_ctl.rc_map)) {
2016 srtt_cur = tp->t_srtt >> TCP_RTT_SHIFT;
2017 srtt = TICKS_2_MSEC(srtt_cur);
2018 if (rack->rc_rack_rtt && (srtt > rack->rc_rack_rtt))
2019 srtt = rack->rc_rack_rtt;
2021 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2025 if (rsm->r_flags & RACK_ACKED) {
2026 rsm = rack_find_lowest_rsm(rack);
2030 idx = rsm->r_rtr_cnt - 1;
2031 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
2032 if (tsused < rsm->r_tim_lastsent[idx]) {
2035 if ((tsused - rsm->r_tim_lastsent[idx]) < thresh) {
2038 /* Ok if we reach here we are over-due */
2039 rack->r_ctl.rc_rsm_start = rsm->r_start;
2040 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
2041 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
2042 rack_cong_signal(tp, NULL, CC_NDUPACK);
2047 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
2053 t = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT) + ((tp->t_rttvar * 4) >> TCP_RTT_SHIFT));
2054 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
2055 tcp_persmin, tcp_persmax);
2056 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
2058 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
2059 ret_val = (uint32_t)tt;
2064 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2067 * Start the FR timer, we do this based on getting the first one in
2068 * the rc_tmap. Note that if its NULL we must stop the timer. in all
2069 * events we need to stop the running timer (if its running) before
2070 * starting the new one.
2072 uint32_t thresh, exp, to, srtt, time_since_sent;
2075 int32_t is_tlp_timer = 0;
2076 struct rack_sendmap *rsm;
2078 if (rack->t_timers_stopped) {
2079 /* All timers have been stopped none are to run */
2082 if (rack->rc_in_persist) {
2083 /* We can't start any timer in persists */
2084 return (rack_get_persists_timer_val(tp, rack));
2086 if (tp->t_state < TCPS_ESTABLISHED)
2088 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2090 /* Nothing on the send map */
2092 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
2093 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
2094 to = TICKS_2_MSEC(tp->t_rxtcur);
2101 if (rsm->r_flags & RACK_ACKED) {
2102 rsm = rack_find_lowest_rsm(rack);
2108 /* Convert from ms to usecs */
2109 if (rsm->r_flags & RACK_SACK_PASSED) {
2110 if ((tp->t_flags & TF_SENTFIN) &&
2111 ((tp->snd_max - tp->snd_una) == 1) &&
2112 (rsm->r_flags & RACK_HAS_FIN)) {
2114 * We don't start a rack timer if all we have is a
2120 srtt_cur = (tp->t_srtt >> TCP_RTT_SHIFT);
2121 srtt = TICKS_2_MSEC(srtt_cur);
2123 srtt = RACK_INITIAL_RTO;
2125 thresh = rack_calc_thresh_rack(rack, srtt, cts);
2126 idx = rsm->r_rtr_cnt - 1;
2127 exp = rsm->r_tim_lastsent[idx] + thresh;
2128 if (SEQ_GEQ(exp, cts)) {
2130 if (to < rack->r_ctl.rc_min_to) {
2131 to = rack->r_ctl.rc_min_to;
2134 to = rack->r_ctl.rc_min_to;
2137 /* Ok we need to do a TLP not RACK */
2138 if ((rack->rc_tlp_in_progress != 0) ||
2139 (rack->r_ctl.rc_tlp_rtx_out != 0)) {
2141 * The previous send was a TLP or a tlp_rtx is in
2146 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
2148 /* We found no rsm to TLP with. */
2151 if (rsm->r_flags & RACK_HAS_FIN) {
2152 /* If its a FIN we dont do TLP */
2156 idx = rsm->r_rtr_cnt - 1;
2157 if (TSTMP_GT(cts, rsm->r_tim_lastsent[idx]))
2158 time_since_sent = cts - rsm->r_tim_lastsent[idx];
2160 time_since_sent = 0;
2163 srtt_cur = (tp->t_srtt >> TCP_RTT_SHIFT);
2164 srtt = TICKS_2_MSEC(srtt_cur);
2166 srtt = RACK_INITIAL_RTO;
2167 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
2168 if (thresh > time_since_sent)
2169 to = thresh - time_since_sent;
2171 to = rack->r_ctl.rc_min_to;
2172 if (to > TCPTV_REXMTMAX) {
2174 * If the TLP time works out to larger than the max
2175 * RTO lets not do TLP.. just RTO.
2179 if (rsm->r_start != rack->r_ctl.rc_last_tlp_seq) {
2181 * The tail is no longer the last one I did a probe
2184 rack->r_ctl.rc_tlp_seg_send_cnt = 0;
2185 rack->r_ctl.rc_last_tlp_seq = rsm->r_start;
2188 if (is_tlp_timer == 0) {
2189 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
2191 if ((rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) ||
2192 (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) {
2194 * We have exceeded how many times we can retran the
2195 * current TLP timer, switch to the RTO timer.
2199 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
2208 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2210 if (rack->rc_in_persist == 0) {
2211 if (((tp->t_flags & TF_SENTFIN) == 0) &&
2212 (tp->snd_max - tp->snd_una) >= sbavail(&rack->rc_inp->inp_socket->so_snd))
2213 /* Must need to send more data to enter persist */
2215 rack->r_ctl.rc_went_idle_time = cts;
2216 rack_timer_cancel(tp, rack, cts, __LINE__);
2218 rack->rc_in_persist = 1;
2223 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack)
2225 if (rack->rc_inp->inp_in_hpts) {
2226 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
2227 rack->r_ctl.rc_hpts_flags = 0;
2229 rack->rc_in_persist = 0;
2230 rack->r_ctl.rc_went_idle_time = 0;
2231 tp->t_flags &= ~TF_FORCEDATA;
2236 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, int32_t line,
2237 int32_t slot, uint32_t tot_len_this_send, int32_t frm_out_sbavail)
2240 uint32_t delayed_ack = 0;
2241 uint32_t hpts_timeout;
2246 if (inp->inp_in_hpts) {
2247 /* A previous call is already set up */
2250 if (tp->t_state == TCPS_CLOSED) {
2253 stopped = rack->rc_tmr_stopped;
2254 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
2255 left = rack->r_ctl.rc_timer_exp - cts;
2257 rack->r_ctl.rc_timer_exp = 0;
2258 if (rack->rc_inp->inp_in_hpts == 0) {
2259 rack->r_ctl.rc_hpts_flags = 0;
2262 /* We are hptsi too */
2263 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
2264 } else if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
2266 * We are still left on the hpts when the to goes
2267 * it will be for output.
2269 if (TSTMP_GT(cts, rack->r_ctl.rc_last_output_to))
2270 slot = cts - rack->r_ctl.rc_last_output_to;
2274 if ((tp->snd_wnd == 0) && TCPS_HAVEESTABLISHED(tp->t_state)) {
2275 /* No send window.. we must enter persist */
2276 rack_enter_persist(tp, rack, cts);
2277 } else if ((frm_out_sbavail &&
2278 (frm_out_sbavail > (tp->snd_max - tp->snd_una)) &&
2279 (tp->snd_wnd < tp->t_maxseg)) &&
2280 TCPS_HAVEESTABLISHED(tp->t_state)) {
2282 * If we have no window or we can't send a segment (and have
2283 * data to send.. we cheat here and frm_out_sbavail is
2284 * passed in with the sbavail(sb) only from bbr_output) and
2285 * we are established, then we must enter persits (if not
2286 * already in persits).
2288 rack_enter_persist(tp, rack, cts);
2290 hpts_timeout = rack_timer_start(tp, rack, cts);
2291 if (tp->t_flags & TF_DELACK) {
2292 delayed_ack = tcp_delacktime;
2293 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
2295 if (delayed_ack && ((hpts_timeout == 0) ||
2296 (delayed_ack < hpts_timeout)))
2297 hpts_timeout = delayed_ack;
2299 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
2301 * If no timers are going to run and we will fall off the hptsi
2302 * wheel, we resort to a keep-alive timer if its configured.
2304 if ((hpts_timeout == 0) &&
2306 if ((tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
2307 (tp->t_state <= TCPS_CLOSING)) {
2309 * Ok we have no timer (persists, rack, tlp, rxt or
2310 * del-ack), we don't have segments being paced. So
2311 * all that is left is the keepalive timer.
2313 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
2314 /* Get the established keep-alive time */
2315 hpts_timeout = TP_KEEPIDLE(tp);
2317 /* Get the initial setup keep-alive time */
2318 hpts_timeout = TP_KEEPINIT(tp);
2320 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
2323 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
2324 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
2326 * RACK, TLP, persists and RXT timers all are restartable
2327 * based on actions input .. i.e we received a packet (ack
2328 * or sack) and that changes things (rw, or snd_una etc).
2329 * Thus we can restart them with a new value. For
2330 * keep-alive, delayed_ack we keep track of what was left
2331 * and restart the timer with a smaller value.
2333 if (left < hpts_timeout)
2334 hpts_timeout = left;
2338 * Hack alert for now we can't time-out over 2,147,483
2339 * seconds (a bit more than 596 hours), which is probably ok
2342 if (hpts_timeout > 0x7ffffffe)
2343 hpts_timeout = 0x7ffffffe;
2344 rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
2347 rack->r_ctl.rc_last_output_to = cts + slot;
2348 if ((hpts_timeout == 0) || (hpts_timeout > slot)) {
2349 if (rack->rc_inp->inp_in_hpts == 0)
2350 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(slot));
2351 rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
2354 * Arrange for the hpts to kick back in after the
2355 * t-o if the t-o does not cause a send.
2357 if (rack->rc_inp->inp_in_hpts == 0)
2358 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout));
2359 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
2361 } else if (hpts_timeout) {
2362 if (rack->rc_inp->inp_in_hpts == 0)
2363 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout));
2364 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
2366 /* No timer starting */
2368 if (SEQ_GT(tp->snd_max, tp->snd_una)) {
2369 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
2370 tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
2374 rack->rc_tmr_stopped = 0;
2376 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, cts);
2380 * RACK Timer, here we simply do logging and house keeping.
2381 * the normal rack_output() function will call the
2382 * appropriate thing to check if we need to do a RACK retransmit.
2383 * We return 1, saying don't proceed with rack_output only
2384 * when all timers have been stopped (destroyed PCB?).
2387 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2390 * This timer simply provides an internal trigger to send out data.
2391 * The check_recovery_mode call will see if there are needed
2392 * retransmissions, if so we will enter fast-recovery. The output
2393 * call may or may not do the same thing depending on sysctl
2396 struct rack_sendmap *rsm;
2399 if (tp->t_timers->tt_flags & TT_STOPPED) {
2402 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
2403 /* Its not time yet */
2406 rack_log_to_event(rack, RACK_TO_FRM_RACK);
2407 recovery = IN_RECOVERY(tp->t_flags);
2408 counter_u64_add(rack_to_tot, 1);
2409 if (rack->r_state && (rack->r_state != tp->t_state))
2410 rack_set_state(tp, rack);
2411 rsm = rack_check_recovery_mode(tp, cts);
2415 rtt = rack->rc_rack_rtt;
2418 if ((recovery == 0) &&
2419 (rack->r_ctl.rc_prr_sndcnt < tp->t_maxseg)) {
2421 * The rack-timeout that enter's us into recovery
2422 * will force out one MSS and set us up so that we
2423 * can do one more send in 2*rtt (transitioning the
2424 * rack timeout into a rack-tlp).
2426 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
2427 } else if ((rack->r_ctl.rc_prr_sndcnt < tp->t_maxseg) &&
2428 ((rsm->r_end - rsm->r_start) > rack->r_ctl.rc_prr_sndcnt)) {
2430 * When a rack timer goes, we have to send at
2431 * least one segment. They will be paced a min of 1ms
2432 * apart via the next rack timer (or further
2433 * if the rack timer dictates it).
2435 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
2438 /* This is a case that should happen rarely if ever */
2439 counter_u64_add(rack_tlp_does_nada, 1);
2441 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
2443 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2445 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
2450 * TLP Timer, here we simply setup what segment we want to
2451 * have the TLP expire on, the normal rack_output() will then
2454 * We return 1, saying don't proceed with rack_output only
2455 * when all timers have been stopped (destroyed PCB?).
2458 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2463 struct rack_sendmap *rsm = NULL;
2465 uint32_t amm, old_prr_snd = 0;
2466 uint32_t out, avail;
2468 if (tp->t_timers->tt_flags & TT_STOPPED) {
2471 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
2472 /* Its not time yet */
2475 if (rack_progress_timeout_check(tp)) {
2476 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
2480 * A TLP timer has expired. We have been idle for 2 rtts. So we now
2481 * need to figure out how to force a full MSS segment out.
2483 rack_log_to_event(rack, RACK_TO_FRM_TLP);
2484 counter_u64_add(rack_tlp_tot, 1);
2485 if (rack->r_state && (rack->r_state != tp->t_state))
2486 rack_set_state(tp, rack);
2487 so = tp->t_inpcb->inp_socket;
2488 avail = sbavail(&so->so_snd);
2489 out = tp->snd_max - tp->snd_una;
2490 rack->rc_timer_up = 1;
2492 * If we are in recovery we can jazz out a segment if new data is
2493 * present simply by setting rc_prr_sndcnt to a segment.
2495 if ((avail > out) &&
2496 ((rack_always_send_oldest == 0) || (TAILQ_EMPTY(&rack->r_ctl.rc_tmap)))) {
2497 /* New data is available */
2499 if (amm > tp->t_maxseg) {
2501 } else if ((amm < tp->t_maxseg) && ((tp->t_flags & TF_NODELAY) == 0)) {
2502 /* not enough to fill a MTU and no-delay is off */
2505 if (IN_RECOVERY(tp->t_flags)) {
2507 old_prr_snd = rack->r_ctl.rc_prr_sndcnt;
2508 if (out + amm <= tp->snd_wnd)
2509 rack->r_ctl.rc_prr_sndcnt = amm;
2513 /* Set the send-new override */
2514 if (out + amm <= tp->snd_wnd)
2515 rack->r_ctl.rc_tlp_new_data = amm;
2519 rack->r_ctl.rc_tlp_seg_send_cnt = 0;
2520 rack->r_ctl.rc_last_tlp_seq = tp->snd_max;
2521 rack->r_ctl.rc_tlpsend = NULL;
2522 counter_u64_add(rack_tlp_newdata, 1);
2527 * Ok we need to arrange the last un-acked segment to be re-sent, or
2528 * optionally the first un-acked segment.
2530 if (rack_always_send_oldest)
2531 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2533 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_map, rack_sendmap, r_next);
2534 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
2535 rsm = rack_find_high_nonack(rack, rsm);
2539 counter_u64_add(rack_tlp_does_nada, 1);
2541 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
2545 if ((rsm->r_end - rsm->r_start) > tp->t_maxseg) {
2547 * We need to split this the last segment in two.
2550 struct rack_sendmap *nrsm;
2552 nrsm = rack_alloc(rack);
2555 * No memory to split, we will just exit and punt
2556 * off to the RXT timer.
2558 counter_u64_add(rack_tlp_does_nada, 1);
2561 nrsm->r_start = (rsm->r_end - tp->t_maxseg);
2562 nrsm->r_end = rsm->r_end;
2563 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
2564 nrsm->r_flags = rsm->r_flags;
2565 nrsm->r_sndcnt = rsm->r_sndcnt;
2566 nrsm->r_rtr_bytes = 0;
2567 rsm->r_end = nrsm->r_start;
2568 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
2569 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
2571 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
2572 if (rsm->r_in_tmap) {
2573 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
2574 nrsm->r_in_tmap = 1;
2576 rsm->r_flags &= (~RACK_HAS_FIN);
2579 rack->r_ctl.rc_tlpsend = rsm;
2580 rack->r_ctl.rc_tlp_rtx_out = 1;
2581 if (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) {
2582 rack->r_ctl.rc_tlp_seg_send_cnt++;
2585 rack->r_ctl.rc_last_tlp_seq = rsm->r_start;
2586 rack->r_ctl.rc_tlp_seg_send_cnt = 1;
2589 rack->r_ctl.rc_tlp_send_cnt++;
2590 if (rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) {
2592 * Can't [re]/transmit a segment we have not heard from the
2593 * peer in max times. We need the retransmit timer to take
2597 rack->r_ctl.rc_tlpsend = NULL;
2599 rsm->r_flags &= ~RACK_TLP;
2600 rack->r_ctl.rc_prr_sndcnt = old_prr_snd;
2601 counter_u64_add(rack_tlp_retran_fail, 1);
2604 rsm->r_flags |= RACK_TLP;
2606 if (rsm && (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) &&
2607 (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) {
2609 * We don't want to send a single segment more than the max
2614 rack->r_timer_override = 1;
2615 rack->r_tlp_running = 1;
2616 rack->rc_tlp_in_progress = 1;
2617 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
2620 rack->rc_timer_up = 0;
2621 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
2626 * Delayed ack Timer, here we simply need to setup the
2627 * ACK_NOW flag and remove the DELACK flag. From there
2628 * the output routine will send the ack out.
2630 * We only return 1, saying don't proceed, if all timers
2631 * are stopped (destroyed PCB?).
2634 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2636 if (tp->t_timers->tt_flags & TT_STOPPED) {
2639 rack_log_to_event(rack, RACK_TO_FRM_DELACK);
2640 tp->t_flags &= ~TF_DELACK;
2641 tp->t_flags |= TF_ACKNOW;
2642 TCPSTAT_INC(tcps_delack);
2643 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
2648 * Persists timer, here we simply need to setup the
2649 * FORCE-DATA flag the output routine will send
2650 * the one byte send.
2652 * We only return 1, saying don't proceed, if all timers
2653 * are stopped (destroyed PCB?).
2656 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2663 if (tp->t_timers->tt_flags & TT_STOPPED) {
2666 if (rack->rc_in_persist == 0)
2668 if (rack_progress_timeout_check(tp)) {
2669 tcp_set_inp_to_drop(inp, ETIMEDOUT);
2672 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
2674 * Persistence timer into zero window. Force a byte to be output, if
2677 TCPSTAT_INC(tcps_persisttimeo);
2679 * Hack: if the peer is dead/unreachable, we do not time out if the
2680 * window is closed. After a full backoff, drop the connection if
2681 * the idle time (no responses to probes) reaches the maximum
2682 * backoff that we would use if retransmitting.
2684 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
2685 (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
2686 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
2687 TCPSTAT_INC(tcps_persistdrop);
2689 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
2692 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
2693 tp->snd_una == tp->snd_max)
2694 rack_exit_persist(tp, rack);
2695 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
2697 * If the user has closed the socket then drop a persisting
2698 * connection after a much reduced timeout.
2700 if (tp->t_state > TCPS_CLOSE_WAIT &&
2701 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
2703 TCPSTAT_INC(tcps_persistdrop);
2704 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
2707 tp->t_flags |= TF_FORCEDATA;
2709 rack_log_to_event(rack, RACK_TO_FRM_PERSIST);
2714 * If a keepalive goes off, we had no other timers
2715 * happening. We always return 1 here since this
2716 * routine either drops the connection or sends
2717 * out a segment with respond.
2720 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2722 struct tcptemp *t_template;
2725 if (tp->t_timers->tt_flags & TT_STOPPED) {
2728 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
2730 rack_log_to_event(rack, RACK_TO_FRM_KEEP);
2732 * Keep-alive timer went off; send something or drop connection if
2733 * idle for too long.
2735 TCPSTAT_INC(tcps_keeptimeo);
2736 if (tp->t_state < TCPS_ESTABLISHED)
2738 if ((tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
2739 tp->t_state <= TCPS_CLOSING) {
2740 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
2743 * Send a packet designed to force a response if the peer is
2744 * up and reachable: either an ACK if the connection is
2745 * still alive, or an RST if the peer has closed the
2746 * connection due to timeout or reboot. Using sequence
2747 * number tp->snd_una-1 causes the transmitted zero-length
2748 * segment to lie outside the receive window; by the
2749 * protocol spec, this requires the correspondent TCP to
2752 TCPSTAT_INC(tcps_keepprobe);
2753 t_template = tcpip_maketemplate(inp);
2755 tcp_respond(tp, t_template->tt_ipgen,
2756 &t_template->tt_t, (struct mbuf *)NULL,
2757 tp->rcv_nxt, tp->snd_una - 1, 0);
2758 free(t_template, M_TEMP);
2761 rack_start_hpts_timer(rack, tp, cts, __LINE__, 0, 0, 0);
2764 TCPSTAT_INC(tcps_keepdrops);
2765 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
2770 * Retransmit helper function, clear up all the ack
2771 * flags and take care of important book keeping.
2774 rack_remxt_tmr(struct tcpcb *tp)
2777 * The retransmit timer went off, all sack'd blocks must be
2780 struct rack_sendmap *rsm, *trsm = NULL;
2781 struct tcp_rack *rack;
2784 rack = (struct tcp_rack *)tp->t_fb_ptr;
2785 rack_timer_cancel(tp, rack, tcp_ts_getticks(), __LINE__);
2786 rack_log_to_event(rack, RACK_TO_FRM_TMR);
2787 if (rack->r_state && (rack->r_state != tp->t_state))
2788 rack_set_state(tp, rack);
2790 * Ideally we would like to be able to
2791 * mark SACK-PASS on anything not acked here.
2792 * However, if we do that we would burst out
2793 * all that data 1ms apart. This would be unwise,
2794 * so for now we will just let the normal rxt timer
2795 * and tlp timer take care of it.
2797 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_map, r_next) {
2798 if (rsm->r_flags & RACK_ACKED) {
2801 if (rsm->r_in_tmap == 0) {
2802 /* We must re-add it back to the tlist */
2804 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
2806 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
2812 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS);
2814 /* Clear the count (we just un-acked them) */
2815 rack->r_ctl.rc_sacked = 0;
2816 /* Clear the tlp rtx mark */
2817 rack->r_ctl.rc_tlp_rtx_out = 0;
2818 rack->r_ctl.rc_tlp_seg_send_cnt = 0;
2819 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_map);
2820 /* Setup so we send one segment */
2821 if (rack->r_ctl.rc_prr_sndcnt < tp->t_maxseg)
2822 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
2823 rack->r_timer_override = 1;
2827 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
2828 * we will setup to retransmit the lowest seq number outstanding.
2831 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2838 if (tp->t_timers->tt_flags & TT_STOPPED) {
2841 if (rack_progress_timeout_check(tp)) {
2842 tcp_set_inp_to_drop(inp, ETIMEDOUT);
2845 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
2846 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
2847 (tp->snd_una == tp->snd_max)) {
2848 /* Nothing outstanding .. nothing to do */
2852 * Retransmission timer went off. Message has not been acked within
2853 * retransmit interval. Back off to a longer retransmit interval
2854 * and retransmit one segment.
2856 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
2857 tp->t_rxtshift = TCP_MAXRXTSHIFT;
2858 TCPSTAT_INC(tcps_timeoutdrop);
2860 tcp_set_inp_to_drop(rack->rc_inp,
2861 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT));
2865 if (tp->t_state == TCPS_SYN_SENT) {
2867 * If the SYN was retransmitted, indicate CWND to be limited
2868 * to 1 segment in cc_conn_init().
2871 } else if (tp->t_rxtshift == 1) {
2873 * first retransmit; record ssthresh and cwnd so they can be
2874 * recovered if this turns out to be a "bad" retransmit. A
2875 * retransmit is considered "bad" if an ACK for this segment
2876 * is received within RTT/2 interval; the assumption here is
2877 * that the ACK was already in flight. See "On Estimating
2878 * End-to-End Network Path Properties" by Allman and Paxson
2881 tp->snd_cwnd_prev = tp->snd_cwnd;
2882 tp->snd_ssthresh_prev = tp->snd_ssthresh;
2883 tp->snd_recover_prev = tp->snd_recover;
2884 if (IN_FASTRECOVERY(tp->t_flags))
2885 tp->t_flags |= TF_WASFRECOVERY;
2887 tp->t_flags &= ~TF_WASFRECOVERY;
2888 if (IN_CONGRECOVERY(tp->t_flags))
2889 tp->t_flags |= TF_WASCRECOVERY;
2891 tp->t_flags &= ~TF_WASCRECOVERY;
2892 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
2893 tp->t_flags |= TF_PREVVALID;
2895 tp->t_flags &= ~TF_PREVVALID;
2896 TCPSTAT_INC(tcps_rexmttimeo);
2897 if ((tp->t_state == TCPS_SYN_SENT) ||
2898 (tp->t_state == TCPS_SYN_RECEIVED))
2899 rexmt = MSEC_2_TICKS(RACK_INITIAL_RTO * tcp_syn_backoff[tp->t_rxtshift]);
2901 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
2902 TCPT_RANGESET(tp->t_rxtcur, rexmt,
2903 max(MSEC_2_TICKS(rack_rto_min), rexmt),
2904 MSEC_2_TICKS(rack_rto_max));
2906 * We enter the path for PLMTUD if connection is established or, if
2907 * connection is FIN_WAIT_1 status, reason for the last is that if
2908 * amount of data we send is very small, we could send it in couple
2909 * of packets and process straight to FIN. In that case we won't
2910 * catch ESTABLISHED state.
2912 if (V_tcp_pmtud_blackhole_detect && (((tp->t_state == TCPS_ESTABLISHED))
2913 || (tp->t_state == TCPS_FIN_WAIT_1))) {
2919 * Idea here is that at each stage of mtu probe (usually,
2920 * 1448 -> 1188 -> 524) should be given 2 chances to recover
2921 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
2922 * should take care of that.
2924 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
2925 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
2926 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
2927 tp->t_rxtshift % 2 == 0)) {
2929 * Enter Path MTU Black-hole Detection mechanism: -
2930 * Disable Path MTU Discovery (IP "DF" bit). -
2931 * Reduce MTU to lower value than what we negotiated
2934 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
2935 /* Record that we may have found a black hole. */
2936 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
2937 /* Keep track of previous MSS. */
2938 tp->t_pmtud_saved_maxseg = tp->t_maxseg;
2942 * Reduce the MSS to blackhole value or to the
2943 * default in an attempt to retransmit.
2946 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? 1 : 0;
2948 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
2949 /* Use the sysctl tuneable blackhole MSS. */
2950 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
2951 TCPSTAT_INC(tcps_pmtud_blackhole_activated);
2952 } else if (isipv6) {
2953 /* Use the default MSS. */
2954 tp->t_maxseg = V_tcp_v6mssdflt;
2956 * Disable Path MTU Discovery when we switch
2959 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
2960 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
2963 #if defined(INET6) && defined(INET)
2967 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
2968 /* Use the sysctl tuneable blackhole MSS. */
2969 tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
2970 TCPSTAT_INC(tcps_pmtud_blackhole_activated);
2972 /* Use the default MSS. */
2973 tp->t_maxseg = V_tcp_mssdflt;
2975 * Disable Path MTU Discovery when we switch
2978 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
2979 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
2984 * If further retransmissions are still unsuccessful
2985 * with a lowered MTU, maybe this isn't a blackhole
2986 * and we restore the previous MSS and blackhole
2987 * detection flags. The limit '6' is determined by
2988 * giving each probe stage (1448, 1188, 524) 2
2989 * chances to recover.
2991 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
2992 (tp->t_rxtshift >= 6)) {
2993 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
2994 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
2995 tp->t_maxseg = tp->t_pmtud_saved_maxseg;
2996 TCPSTAT_INC(tcps_pmtud_blackhole_failed);
3001 * Disable RFC1323 and SACK if we haven't got any response to our
3002 * third SYN to work-around some broken terminal servers (most of
3003 * which have hopefully been retired) that have bad VJ header
3004 * compression code which trashes TCP segments containing
3005 * unknown-to-them TCP options.
3007 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
3008 (tp->t_rxtshift == 3))
3009 tp->t_flags &= ~(TF_REQ_SCALE | TF_REQ_TSTMP | TF_SACK_PERMIT);
3011 * If we backed off this far, our srtt estimate is probably bogus.
3012 * Clobber it so we'll take the next rtt measurement as our srtt;
3013 * move the current srtt into rttvar to keep the current retransmit
3016 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
3018 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
3019 in6_losing(tp->t_inpcb);
3022 in_losing(tp->t_inpcb);
3023 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
3026 if (rack_use_sack_filter)
3027 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
3028 tp->snd_recover = tp->snd_max;
3029 tp->t_flags |= TF_ACKNOW;
3031 rack_cong_signal(tp, NULL, CC_RTO);
3037 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling)
3040 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
3045 if (tp->t_state == TCPS_LISTEN) {
3046 /* no timers on listen sockets */
3047 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
3051 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
3054 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
3056 rack_log_to_processing(rack, cts, ret, 0);
3059 if (hpts_calling == 0) {
3061 rack_log_to_processing(rack, cts, ret, 0);
3065 * Ok our timer went off early and we are not paced false
3066 * alarm, go back to sleep.
3069 left = rack->r_ctl.rc_timer_exp - cts;
3070 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left));
3071 rack_log_to_processing(rack, cts, ret, left);
3072 rack->rc_last_pto_set = 0;
3075 rack->rc_tmr_stopped = 0;
3076 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
3077 if (timers & PACE_TMR_DELACK) {
3078 ret = rack_timeout_delack(tp, rack, cts);
3079 } else if (timers & PACE_TMR_RACK) {
3080 ret = rack_timeout_rack(tp, rack, cts);
3081 } else if (timers & PACE_TMR_TLP) {
3082 ret = rack_timeout_tlp(tp, rack, cts);
3083 } else if (timers & PACE_TMR_RXT) {
3084 ret = rack_timeout_rxt(tp, rack, cts);
3085 } else if (timers & PACE_TMR_PERSIT) {
3086 ret = rack_timeout_persist(tp, rack, cts);
3087 } else if (timers & PACE_TMR_KEEP) {
3088 ret = rack_timeout_keepalive(tp, rack, cts);
3090 rack_log_to_processing(rack, cts, ret, timers);
3095 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
3097 uint8_t hpts_removed = 0;
3099 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
3100 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) {
3101 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
3104 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
3105 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
3106 if (rack->rc_inp->inp_in_hpts &&
3107 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
3109 * Canceling timer's when we have no output being
3110 * paced. We also must remove ourselves from the
3113 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
3116 rack_log_to_cancel(rack, hpts_removed, line);
3117 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
3122 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type)
3128 rack_stopall(struct tcpcb *tp)
3130 struct tcp_rack *rack;
3131 rack = (struct tcp_rack *)tp->t_fb_ptr;
3132 rack->t_timers_stopped = 1;
3137 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta)
3143 rack_timer_active(struct tcpcb *tp, uint32_t timer_type)
3149 rack_stop_all_timers(struct tcpcb *tp)
3151 struct tcp_rack *rack;
3154 * Assure no timers are running.
3156 if (tcp_timer_active(tp, TT_PERSIST)) {
3157 /* We enter in persists, set the flag appropriately */
3158 rack = (struct tcp_rack *)tp->t_fb_ptr;
3159 rack->rc_in_persist = 1;
3161 tcp_timer_suspend(tp, TT_PERSIST);
3162 tcp_timer_suspend(tp, TT_REXMT);
3163 tcp_timer_suspend(tp, TT_KEEP);
3164 tcp_timer_suspend(tp, TT_DELACK);
3168 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
3169 struct rack_sendmap *rsm, uint32_t ts)
3175 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
3176 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
3177 rsm->r_flags |= RACK_OVERMAX;
3179 if ((rsm->r_rtr_cnt > 1) && (rack->r_tlp_running == 0)) {
3180 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
3181 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
3183 idx = rsm->r_rtr_cnt - 1;
3184 rsm->r_tim_lastsent[idx] = ts;
3185 if (rsm->r_flags & RACK_ACKED) {
3186 /* Problably MTU discovery messing with us */
3187 rsm->r_flags &= ~RACK_ACKED;
3188 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
3190 if (rsm->r_in_tmap) {
3191 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
3193 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
3195 if (rsm->r_flags & RACK_SACK_PASSED) {
3196 /* We have retransmitted due to the SACK pass */
3197 rsm->r_flags &= ~RACK_SACK_PASSED;
3198 rsm->r_flags |= RACK_WAS_SACKPASS;
3200 /* Update memory for next rtr */
3201 rack->r_ctl.rc_next = TAILQ_NEXT(rsm, r_next);
3206 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
3207 struct rack_sendmap *rsm, uint32_t ts, int32_t * lenp)
3210 * We (re-)transmitted starting at rsm->r_start for some length
3211 * (possibly less than r_end.
3213 struct rack_sendmap *nrsm;
3219 c_end = rsm->r_start + len;
3220 if (SEQ_GEQ(c_end, rsm->r_end)) {
3222 * We retransmitted the whole piece or more than the whole
3223 * slopping into the next rsm.
3225 rack_update_rsm(tp, rack, rsm, ts);
3226 if (c_end == rsm->r_end) {
3232 /* Hangs over the end return whats left */
3233 act_len = rsm->r_end - rsm->r_start;
3234 *lenp = (len - act_len);
3235 return (rsm->r_end);
3237 /* We don't get out of this block. */
3240 * Here we retransmitted less than the whole thing which means we
3241 * have to split this into what was transmitted and what was not.
3243 nrsm = rack_alloc(rack);
3246 * We can't get memory, so lets not proceed.
3252 * So here we are going to take the original rsm and make it what we
3253 * retransmitted. nrsm will be the tail portion we did not
3254 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
3255 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
3256 * 1, 6 and the new piece will be 6, 11.
3258 nrsm->r_start = c_end;
3259 nrsm->r_end = rsm->r_end;
3260 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
3261 nrsm->r_flags = rsm->r_flags;
3262 nrsm->r_sndcnt = rsm->r_sndcnt;
3263 nrsm->r_rtr_bytes = 0;
3265 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
3266 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
3268 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
3269 if (rsm->r_in_tmap) {
3270 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
3271 nrsm->r_in_tmap = 1;
3273 rsm->r_flags &= (~RACK_HAS_FIN);
3274 rack_update_rsm(tp, rack, rsm, ts);
3281 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
3282 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts,
3283 uint8_t pass, struct rack_sendmap *hintrsm)
3285 struct tcp_rack *rack;
3286 struct rack_sendmap *rsm, *nrsm;
3287 register uint32_t snd_max, snd_una;
3291 * Add to the RACK log of packets in flight or retransmitted. If
3292 * there is a TS option we will use the TS echoed, if not we will
3295 * Retransmissions will increment the count and move the ts to its
3296 * proper place. Note that if options do not include TS's then we
3297 * won't be able to effectively use the ACK for an RTT on a retran.
3299 * Notes about r_start and r_end. Lets consider a send starting at
3300 * sequence 1 for 10 bytes. In such an example the r_start would be
3301 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
3302 * This means that r_end is actually the first sequence for the next
3307 * If err is set what do we do XXXrrs? should we not add the thing?
3308 * -- i.e. return if err != 0 or should we pretend we sent it? --
3309 * i.e. proceed with add ** do this for now.
3311 INP_WLOCK_ASSERT(tp->t_inpcb);
3314 * We don't log errors -- we could but snd_max does not
3315 * advance in this case either.
3319 if (th_flags & TH_RST) {
3321 * We don't log resets and we return immediately from
3326 rack = (struct tcp_rack *)tp->t_fb_ptr;
3327 snd_una = tp->snd_una;
3328 if (SEQ_LEQ((seq_out + len), snd_una)) {
3329 /* Are sending an old segment to induce an ack (keep-alive)? */
3332 if (SEQ_LT(seq_out, snd_una)) {
3333 /* huh? should we panic? */
3336 end = seq_out + len;
3338 len = end - seq_out;
3340 snd_max = tp->snd_max;
3341 if (th_flags & (TH_SYN | TH_FIN)) {
3343 * The call to rack_log_output is made before bumping
3344 * snd_max. This means we can record one extra byte on a SYN
3345 * or FIN if seq_out is adding more on and a FIN is present
3346 * (and we are not resending).
3348 if (th_flags & TH_SYN)
3350 if (th_flags & TH_FIN)
3352 if (SEQ_LT(snd_max, tp->snd_nxt)) {
3354 * The add/update as not been done for the FIN/SYN
3357 snd_max = tp->snd_nxt;
3361 /* We don't log zero window probes */
3364 rack->r_ctl.rc_time_last_sent = ts;
3365 if (IN_RECOVERY(tp->t_flags)) {
3366 rack->r_ctl.rc_prr_out += len;
3368 /* First question is it a retransmission? */
3369 if (seq_out == snd_max) {
3371 rsm = rack_alloc(rack);
3374 * Hmm out of memory and the tcb got destroyed while
3378 panic("Out of memory when we should not be rack:%p", rack);
3382 if (th_flags & TH_FIN) {
3383 rsm->r_flags = RACK_HAS_FIN;
3387 rsm->r_tim_lastsent[0] = ts;
3389 rsm->r_rtr_bytes = 0;
3390 if (th_flags & TH_SYN) {
3391 /* The data space is one beyond snd_una */
3392 rsm->r_start = seq_out + 1;
3393 rsm->r_end = rsm->r_start + (len - 1);
3396 rsm->r_start = seq_out;
3397 rsm->r_end = rsm->r_start + len;
3400 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_map, rsm, r_next);
3401 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
3406 * If we reach here its a retransmission and we need to find it.
3409 if (hintrsm && (hintrsm->r_start == seq_out)) {
3412 } else if (rack->r_ctl.rc_next) {
3413 /* We have a hint from a previous run */
3414 rsm = rack->r_ctl.rc_next;
3416 /* No hints sorry */
3419 if ((rsm) && (rsm->r_start == seq_out)) {
3421 * We used rc_next or hintrsm to retransmit, hopefully the
3424 seq_out = rack_update_entry(tp, rack, rsm, ts, &len);
3431 /* Ok it was not the last pointer go through it the hard way. */
3432 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_map, r_next) {
3433 if (rsm->r_start == seq_out) {
3434 seq_out = rack_update_entry(tp, rack, rsm, ts, &len);
3435 rack->r_ctl.rc_next = TAILQ_NEXT(rsm, r_next);
3442 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
3443 /* Transmitted within this piece */
3445 * Ok we must split off the front and then let the
3446 * update do the rest
3448 nrsm = rack_alloc(rack);
3451 panic("Ran out of memory that was preallocated? rack:%p", rack);
3453 rack_update_rsm(tp, rack, rsm, ts);
3457 * copy rsm to nrsm and then trim the front of rsm
3458 * to not include this part.
3460 nrsm->r_start = seq_out;
3461 nrsm->r_end = rsm->r_end;
3462 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
3463 nrsm->r_flags = rsm->r_flags;
3464 nrsm->r_sndcnt = rsm->r_sndcnt;
3465 nrsm->r_rtr_bytes = 0;
3466 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
3467 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
3469 rsm->r_end = nrsm->r_start;
3470 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
3471 if (rsm->r_in_tmap) {
3472 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
3473 nrsm->r_in_tmap = 1;
3475 rsm->r_flags &= (~RACK_HAS_FIN);
3476 seq_out = rack_update_entry(tp, rack, nrsm, ts, &len);
3483 * Hmm not found in map did they retransmit both old and on into the
3486 if (seq_out == tp->snd_max) {
3488 } else if (SEQ_LT(seq_out, tp->snd_max)) {
3490 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
3491 seq_out, len, tp->snd_una, tp->snd_max);
3492 printf("Starting Dump of all rack entries\n");
3493 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_map, r_next) {
3494 printf("rsm:%p start:%u end:%u\n",
3495 rsm, rsm->r_start, rsm->r_end);
3497 printf("Dump complete\n");
3498 panic("seq_out not found rack:%p tp:%p",
3504 * Hmm beyond sndmax? (only if we are using the new rtt-pack
3507 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
3508 seq_out, len, tp->snd_max, tp);
3514 * Record one of the RTT updates from an ack into
3515 * our sample structure.
3518 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt)
3520 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
3521 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
3522 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
3524 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
3525 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
3526 rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
3528 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
3529 rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
3530 rack->r_ctl.rack_rs.rs_rtt_cnt++;
3534 * Collect new round-trip time estimate
3535 * and update averages and current timeout.
3538 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
3541 uint32_t o_srtt, o_var;
3544 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
3545 /* No valid sample */
3547 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
3548 /* We are to use the lowest RTT seen in a single ack */
3549 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
3550 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
3551 /* We are to use the highest RTT seen in a single ack */
3552 rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
3553 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
3554 /* We are to use the average RTT seen in a single ack */
3555 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
3556 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
3559 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
3565 rack_log_rtt_sample(rack, rtt);
3566 o_srtt = tp->t_srtt;
3567 o_var = tp->t_rttvar;
3568 rack = (struct tcp_rack *)tp->t_fb_ptr;
3569 if (tp->t_srtt != 0) {
3571 * srtt is stored as fixed point with 5 bits after the
3572 * binary point (i.e., scaled by 8). The following magic is
3573 * equivalent to the smoothing algorithm in rfc793 with an
3574 * alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed point).
3575 * Adjust rtt to origin 0.
3577 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3578 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3580 tp->t_srtt += delta;
3581 if (tp->t_srtt <= 0)
3585 * We accumulate a smoothed rtt variance (actually, a
3586 * smoothed mean difference), then set the retransmit timer
3587 * to smoothed rtt + 4 times the smoothed variance. rttvar
3588 * is stored as fixed point with 4 bits after the binary
3589 * point (scaled by 16). The following is equivalent to
3590 * rfc793 smoothing with an alpha of .75 (rttvar =
3591 * rttvar*3/4 + |delta| / 4). This replaces rfc793's
3596 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3597 tp->t_rttvar += delta;
3598 if (tp->t_rttvar <= 0)
3600 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3601 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3604 * No rtt measurement yet - use the unsmoothed rtt. Set the
3605 * variance to half the rtt (so our first retransmit happens
3608 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3609 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3610 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3612 TCPSTAT_INC(tcps_rttupdated);
3613 rack_log_rtt_upd(tp, rack, rtt, o_srtt, o_var);
3615 #ifdef NETFLIX_STATS
3616 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
3621 * the retransmit should happen at rtt + 4 * rttvar. Because of the
3622 * way we do the smoothing, srtt and rttvar will each average +1/2
3623 * tick of bias. When we compute the retransmit timer, we want 1/2
3624 * tick of rounding and 1 extra tick because of +-1/2 tick
3625 * uncertainty in the firing of the timer. The bias will give us
3626 * exactly the 1.5 tick we need. But, because the bias is
3627 * statistical, we have to test that we don't drop below the minimum
3628 * feasible timer (which is 2 ticks).
3630 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3631 max(MSEC_2_TICKS(rack_rto_min), rtt + 2), MSEC_2_TICKS(rack_rto_max));
3632 tp->t_softerror = 0;
3636 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm,
3637 uint32_t t, uint32_t cts)
3640 * For this RSM, we acknowledged the data from a previous
3641 * transmission, not the last one we made. This means we did a false
3644 struct tcp_rack *rack;
3646 if (rsm->r_flags & RACK_HAS_FIN) {
3648 * The sending of the FIN often is multiple sent when we
3649 * have everything outstanding ack'd. We ignore this case
3650 * since its over now.
3654 if (rsm->r_flags & RACK_TLP) {
3656 * We expect TLP's to have this occur.
3660 rack = (struct tcp_rack *)tp->t_fb_ptr;
3661 /* should we undo cc changes and exit recovery? */
3662 if (IN_RECOVERY(tp->t_flags)) {
3663 if (rack->r_ctl.rc_rsm_start == rsm->r_start) {
3665 * Undo what we ratched down and exit recovery if
3668 EXIT_RECOVERY(tp->t_flags);
3669 tp->snd_recover = tp->snd_una;
3670 if (rack->r_ctl.rc_cwnd_at > tp->snd_cwnd)
3671 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at;
3672 if (rack->r_ctl.rc_ssthresh_at > tp->snd_ssthresh)
3673 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at;
3676 if (rsm->r_flags & RACK_WAS_SACKPASS) {
3678 * We retransmitted based on a sack and the earlier
3679 * retransmission ack'd it - re-ordering is occuring.
3681 counter_u64_add(rack_reorder_seen, 1);
3682 rack->r_ctl.rc_reorder_ts = cts;
3684 counter_u64_add(rack_badfr, 1);
3685 counter_u64_add(rack_badfr_bytes, (rsm->r_end - rsm->r_start));
3690 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
3691 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type)
3696 if (rsm->r_flags & RACK_ACKED)
3701 if ((rsm->r_rtr_cnt == 1) ||
3702 ((ack_type == CUM_ACKED) &&
3703 (to->to_flags & TOF_TS) &&
3705 (rsm->r_tim_lastsent[rsm->r_rtr_cnt - 1] == to->to_tsecr))
3708 * We will only find a matching timestamp if its cum-acked.
3709 * But if its only one retransmission its for-sure matching
3712 t = cts - rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
3715 if (!tp->t_rttlow || tp->t_rttlow > t)
3717 if (!rack->r_ctl.rc_rack_min_rtt ||
3718 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
3719 rack->r_ctl.rc_rack_min_rtt = t;
3720 if (rack->r_ctl.rc_rack_min_rtt == 0) {
3721 rack->r_ctl.rc_rack_min_rtt = 1;
3724 tcp_rack_xmit_timer(rack, TCP_TS_TO_TICKS(t) + 1);
3725 if ((rsm->r_flags & RACK_TLP) &&
3726 (!IN_RECOVERY(tp->t_flags))) {
3727 /* Segment was a TLP and our retrans matched */
3728 if (rack->r_ctl.rc_tlp_cwnd_reduce) {
3729 rack->r_ctl.rc_rsm_start = tp->snd_max;
3730 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
3731 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
3732 rack_cong_signal(tp, NULL, CC_NDUPACK);
3734 * When we enter recovery we need to assure
3735 * we send one packet.
3737 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
3739 rack->r_ctl.rc_tlp_rtx_out = 0;
3741 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
3742 /* New more recent rack_tmit_time */
3743 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
3744 rack->rc_rack_rtt = t;
3749 * We clear the soft/rxtshift since we got an ack.
3750 * There is no assurance we will call the commit() function
3751 * so we need to clear these to avoid incorrect handling.
3754 tp->t_softerror = 0;
3755 if ((to->to_flags & TOF_TS) &&
3756 (ack_type == CUM_ACKED) &&
3758 ((rsm->r_flags & (RACK_DEFERRED | RACK_OVERMAX)) == 0)) {
3760 * Now which timestamp does it match? In this block the ACK
3761 * must be coming from a previous transmission.
3763 for (i = 0; i < rsm->r_rtr_cnt; i++) {
3764 if (rsm->r_tim_lastsent[i] == to->to_tsecr) {
3765 t = cts - rsm->r_tim_lastsent[i];
3768 if ((i + 1) < rsm->r_rtr_cnt) {
3770 rack_earlier_retran(tp, rsm, t, cts);
3772 if (!tp->t_rttlow || tp->t_rttlow > t)
3774 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
3775 rack->r_ctl.rc_rack_min_rtt = t;
3776 if (rack->r_ctl.rc_rack_min_rtt == 0) {
3777 rack->r_ctl.rc_rack_min_rtt = 1;
3781 * Note the following calls to
3782 * tcp_rack_xmit_timer() are being commented
3783 * out for now. They give us no more accuracy
3784 * and often lead to a wrong choice. We have
3785 * enough samples that have not been
3786 * retransmitted. I leave the commented out
3787 * code in here in case in the future we
3788 * decide to add it back (though I can't forsee
3789 * doing that). That way we will easily see
3790 * where they need to be placed.
3792 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
3793 rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
3794 /* New more recent rack_tmit_time */
3795 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
3796 rack->rc_rack_rtt = t;
3804 * Ok its a SACK block that we retransmitted. or a windows
3805 * machine without timestamps. We can tell nothing from the
3806 * time-stamp since its not there or the time the peer last
3807 * recieved a segment that moved forward its cum-ack point.
3810 i = rsm->r_rtr_cnt - 1;
3811 t = cts - rsm->r_tim_lastsent[i];
3814 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
3816 * We retransmitted and the ack came back in less
3817 * than the smallest rtt we have observed. We most
3818 * likey did an improper retransmit as outlined in
3819 * 4.2 Step 3 point 2 in the rack-draft.
3821 i = rsm->r_rtr_cnt - 2;
3822 t = cts - rsm->r_tim_lastsent[i];
3823 rack_earlier_retran(tp, rsm, t, cts);
3824 } else if (rack->r_ctl.rc_rack_min_rtt) {
3826 * We retransmitted it and the retransmit did the
3829 if (!rack->r_ctl.rc_rack_min_rtt ||
3830 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
3831 rack->r_ctl.rc_rack_min_rtt = t;
3832 if (rack->r_ctl.rc_rack_min_rtt == 0) {
3833 rack->r_ctl.rc_rack_min_rtt = 1;
3836 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[i])) {
3837 /* New more recent rack_tmit_time */
3838 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[i];
3839 rack->rc_rack_rtt = t;
3848 * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
3851 rack_log_sack_passed(struct tcpcb *tp,
3852 struct tcp_rack *rack, struct rack_sendmap *rsm)
3854 struct rack_sendmap *nrsm;
3858 idx = rsm->r_rtr_cnt - 1;
3859 ts = rsm->r_tim_lastsent[idx];
3861 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
3862 rack_head, r_tnext) {
3864 /* Skip orginal segment he is acked */
3867 if (nrsm->r_flags & RACK_ACKED) {
3868 /* Skip ack'd segments */
3871 idx = nrsm->r_rtr_cnt - 1;
3872 if (ts == nrsm->r_tim_lastsent[idx]) {
3874 * For this case lets use seq no, if we sent in a
3875 * big block (TSO) we would have a bunch of segments
3876 * sent at the same time.
3878 * We would only get a report if its SEQ is earlier.
3879 * If we have done multiple retransmits the times
3880 * would not be equal.
3882 if (SEQ_LT(nrsm->r_start, rsm->r_start)) {
3883 nrsm->r_flags |= RACK_SACK_PASSED;
3884 nrsm->r_flags &= ~RACK_WAS_SACKPASS;
3888 * Here they were sent at different times, not a big
3889 * block. Since we transmitted this one later and
3890 * see it sack'd then this must also be missing (or
3891 * we would have gotten a sack block for it)
3893 nrsm->r_flags |= RACK_SACK_PASSED;
3894 nrsm->r_flags &= ~RACK_WAS_SACKPASS;
3900 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
3901 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts)
3905 uint32_t start, end, changed = 0;
3906 struct rack_sendmap *rsm, *nrsm;
3907 int32_t used_ref = 1;
3909 start = sack->start;
3912 if (rsm && SEQ_LT(start, rsm->r_start)) {
3913 TAILQ_FOREACH_REVERSE_FROM(rsm, &rack->r_ctl.rc_map, rack_head, r_next) {
3914 if (SEQ_GEQ(start, rsm->r_start) &&
3915 SEQ_LT(start, rsm->r_end)) {
3925 /* First lets locate the block where this guy is */
3926 TAILQ_FOREACH_FROM(rsm, &rack->r_ctl.rc_map, r_next) {
3927 if (SEQ_GEQ(start, rsm->r_start) &&
3928 SEQ_LT(start, rsm->r_end)) {
3935 * This happens when we get duplicate sack blocks with the
3936 * same end. For example SACK 4: 100 SACK 3: 100 The sort
3937 * will not change there location so we would just start at
3938 * the end of the first one and get lost.
3940 if (tp->t_flags & TF_SENTFIN) {
3942 * Check to see if we have not logged the FIN that
3945 nrsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_map, rack_sendmap, r_next);
3946 if (nrsm && (nrsm->r_end + 1) == tp->snd_max) {
3948 * Ok we did not get the FIN logged.
3957 panic("tp:%p rack:%p sack:%p to:%p prsm:%p",
3958 tp, rack, sack, to, prsm);
3964 counter_u64_add(rack_sack_proc_restart, 1);
3965 goto start_at_beginning;
3967 /* Ok we have an ACK for some piece of rsm */
3968 if (rsm->r_start != start) {
3970 * Need to split this in two pieces the before and after.
3972 nrsm = rack_alloc(rack);
3975 * failed XXXrrs what can we do but loose the sack
3980 nrsm->r_start = start;
3981 nrsm->r_rtr_bytes = 0;
3982 nrsm->r_end = rsm->r_end;
3983 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
3984 nrsm->r_flags = rsm->r_flags;
3985 nrsm->r_sndcnt = rsm->r_sndcnt;
3986 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
3987 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
3989 rsm->r_end = nrsm->r_start;
3990 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
3991 if (rsm->r_in_tmap) {
3992 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
3993 nrsm->r_in_tmap = 1;
3995 rsm->r_flags &= (~RACK_HAS_FIN);
3998 if (SEQ_GEQ(end, rsm->r_end)) {
4000 * The end of this block is either beyond this guy or right
4004 if ((rsm->r_flags & RACK_ACKED) == 0) {
4005 rack_update_rtt(tp, rack, rsm, to, cts, SACKED);
4006 changed += (rsm->r_end - rsm->r_start);
4007 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
4008 rack_log_sack_passed(tp, rack, rsm);
4009 /* Is Reordering occuring? */
4010 if (rsm->r_flags & RACK_SACK_PASSED) {
4011 counter_u64_add(rack_reorder_seen, 1);
4012 rack->r_ctl.rc_reorder_ts = cts;
4014 rsm->r_flags |= RACK_ACKED;
4015 rsm->r_flags &= ~RACK_TLP;
4016 if (rsm->r_in_tmap) {
4017 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4021 if (end == rsm->r_end) {
4022 /* This block only - done */
4025 /* There is more not coverend by this rsm move on */
4027 nrsm = TAILQ_NEXT(rsm, r_next);
4032 /* Ok we need to split off this one at the tail */
4033 nrsm = rack_alloc(rack);
4035 /* failed rrs what can we do but loose the sack info? */
4039 nrsm->r_start = end;
4040 nrsm->r_end = rsm->r_end;
4041 nrsm->r_rtr_bytes = 0;
4042 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
4043 nrsm->r_flags = rsm->r_flags;
4044 nrsm->r_sndcnt = rsm->r_sndcnt;
4045 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
4046 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
4048 /* The sack block does not cover this guy fully */
4049 rsm->r_flags &= (~RACK_HAS_FIN);
4051 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
4052 if (rsm->r_in_tmap) {
4053 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
4054 nrsm->r_in_tmap = 1;
4056 if (rsm->r_flags & RACK_ACKED) {
4057 /* Been here done that */
4060 rack_update_rtt(tp, rack, rsm, to, cts, SACKED);
4061 changed += (rsm->r_end - rsm->r_start);
4062 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
4063 rack_log_sack_passed(tp, rack, rsm);
4064 /* Is Reordering occuring? */
4065 if (rsm->r_flags & RACK_SACK_PASSED) {
4066 counter_u64_add(rack_reorder_seen, 1);
4067 rack->r_ctl.rc_reorder_ts = cts;
4069 rsm->r_flags |= RACK_ACKED;
4070 rsm->r_flags &= ~RACK_TLP;
4071 if (rsm->r_in_tmap) {
4072 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4076 if (used_ref == 0) {
4077 counter_u64_add(rack_sack_proc_all, 1);
4079 counter_u64_add(rack_sack_proc_short, 1);
4081 /* Save off where we last were */
4083 rack->r_ctl.rc_sacklast = TAILQ_NEXT(rsm, r_next);
4085 rack->r_ctl.rc_sacklast = NULL;
4091 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
4093 struct rack_sendmap *tmap;
4096 while (rsm && (rsm->r_flags & RACK_ACKED)) {
4097 /* Its no longer sacked, mark it so */
4098 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
4100 if (rsm->r_in_tmap) {
4101 panic("rack:%p rsm:%p flags:0x%x in tmap?",
4102 rack, rsm, rsm->r_flags);
4105 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
4106 /* Rebuild it into our tmap */
4108 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4111 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
4114 tmap->r_in_tmap = 1;
4115 rsm = TAILQ_NEXT(rsm, r_next);
4118 * Now lets possibly clear the sack filter so we start
4119 * recognizing sacks that cover this area.
4121 if (rack_use_sack_filter)
4122 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
4127 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
4129 uint32_t changed, last_seq, entered_recovery = 0;
4130 struct tcp_rack *rack;
4131 struct rack_sendmap *rsm;
4132 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
4133 register uint32_t th_ack;
4134 int32_t i, j, k, num_sack_blks = 0;
4135 uint32_t cts, acked, ack_point, sack_changed = 0;
4137 INP_WLOCK_ASSERT(tp->t_inpcb);
4138 if (th->th_flags & TH_RST) {
4139 /* We don't log resets */
4142 rack = (struct tcp_rack *)tp->t_fb_ptr;
4143 cts = tcp_ts_getticks();
4144 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
4146 th_ack = th->th_ack;
4148 if (SEQ_GT(th_ack, tp->snd_una)) {
4149 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
4150 tp->t_acktime = ticks;
4152 if (rsm && SEQ_GT(th_ack, rsm->r_start))
4153 changed = th_ack - rsm->r_start;
4156 * The ACK point is advancing to th_ack, we must drop off
4157 * the packets in the rack log and calculate any eligble
4160 rack->r_wanted_output++;
4162 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
4164 if ((th_ack - 1) == tp->iss) {
4166 * For the SYN incoming case we will not
4167 * have called tcp_output for the sending of
4168 * the SYN, so there will be no map. All
4169 * other cases should probably be a panic.
4173 if (tp->t_flags & TF_SENTFIN) {
4174 /* if we send a FIN we will not hav a map */
4178 panic("No rack map tp:%p for th:%p state:%d rack:%p snd_una:%u snd_max:%u snd_nxt:%u chg:%d\n",
4180 th, tp->t_state, rack,
4181 tp->snd_una, tp->snd_max, tp->snd_nxt, changed);
4185 if (SEQ_LT(th_ack, rsm->r_start)) {
4186 /* Huh map is missing this */
4188 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
4190 th_ack, tp->t_state, rack->r_state);
4194 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED);
4195 /* Now do we consume the whole thing? */
4196 if (SEQ_GEQ(th_ack, rsm->r_end)) {
4197 /* Its all consumed. */
4200 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
4201 rsm->r_rtr_bytes = 0;
4202 TAILQ_REMOVE(&rack->r_ctl.rc_map, rsm, r_next);
4203 if (rsm->r_in_tmap) {
4204 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4207 if (rack->r_ctl.rc_next == rsm) {
4208 /* scoot along the marker */
4209 rack->r_ctl.rc_next = TAILQ_FIRST(&rack->r_ctl.rc_map);
4211 if (rsm->r_flags & RACK_ACKED) {
4213 * It was acked on the scoreboard -- remove
4216 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
4217 } else if (rsm->r_flags & RACK_SACK_PASSED) {
4219 * There are acked segments ACKED on the
4220 * scoreboard further up. We are seeing
4223 counter_u64_add(rack_reorder_seen, 1);
4224 rsm->r_flags |= RACK_ACKED;
4225 rack->r_ctl.rc_reorder_ts = cts;
4227 left = th_ack - rsm->r_end;
4228 if (rsm->r_rtr_cnt > 1) {
4230 * Technically we should make r_rtr_cnt be
4231 * monotonicly increasing and just mod it to
4232 * the timestamp it is replacing.. that way
4233 * we would have the last 3 retransmits. Now
4234 * rc_loss_count will be wrong if we
4235 * retransmit something more than 2 times in
4238 rack->r_ctl.rc_loss_count += (rsm->r_rtr_cnt - 1);
4240 /* Free back to zone */
4241 rack_free(rack, rsm);
4247 if (rsm->r_flags & RACK_ACKED) {
4249 * It was acked on the scoreboard -- remove it from
4250 * total for the part being cum-acked.
4252 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
4254 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
4255 rsm->r_rtr_bytes = 0;
4256 rsm->r_start = th_ack;
4259 /* Check for reneging */
4260 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
4261 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
4263 * The peer has moved snd_una up to
4264 * the edge of this send, i.e. one
4265 * that it had previously acked. The only
4266 * way that can be true if the peer threw
4267 * away data (space issues) that it had
4268 * previously sacked (else it would have
4269 * given us snd_una up to (rsm->r_end).
4270 * We need to undo the acked markings here.
4272 * Note we have to look to make sure th_ack is
4273 * our rsm->r_start in case we get an old ack
4274 * where th_ack is behind snd_una.
4276 rack_peer_reneges(rack, rsm, th->th_ack);
4278 if ((to->to_flags & TOF_SACK) == 0) {
4279 /* We are done nothing left to log */
4282 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_map, rack_sendmap, r_next);
4284 last_seq = rsm->r_end;
4286 last_seq = tp->snd_max;
4288 /* Sack block processing */
4289 if (SEQ_GT(th_ack, tp->snd_una))
4292 ack_point = tp->snd_una;
4293 for (i = 0; i < to->to_nsacks; i++) {
4294 bcopy((to->to_sacks + i * TCPOLEN_SACK),
4295 &sack, sizeof(sack));
4296 sack.start = ntohl(sack.start);
4297 sack.end = ntohl(sack.end);
4298 if (SEQ_GT(sack.end, sack.start) &&
4299 SEQ_GT(sack.start, ack_point) &&
4300 SEQ_LT(sack.start, tp->snd_max) &&
4301 SEQ_GT(sack.end, ack_point) &&
4302 SEQ_LEQ(sack.end, tp->snd_max)) {
4303 if ((rack->r_ctl.rc_num_maps_alloced > rack_sack_block_limit) &&
4304 (SEQ_LT(sack.end, last_seq)) &&
4305 ((sack.end - sack.start) < (tp->t_maxseg / 8))) {
4307 * Not the last piece and its smaller than
4308 * 1/8th of a MSS. We ignore this.
4310 counter_u64_add(rack_runt_sacks, 1);
4313 sack_blocks[num_sack_blks] = sack;
4315 #ifdef NETFLIX_STATS
4316 } else if (SEQ_LEQ(sack.start, th_ack) &&
4317 SEQ_LEQ(sack.end, th_ack)) {
4319 * Its a D-SACK block.
4321 tcp_record_dsack(sack.start, sack.end);
4326 if (num_sack_blks == 0)
4329 * Sort the SACK blocks so we can update the rack scoreboard with
4332 if (rack_use_sack_filter) {
4333 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, num_sack_blks, th->th_ack);
4335 if (num_sack_blks < 2) {
4338 /* Sort the sacks */
4339 for (i = 0; i < num_sack_blks; i++) {
4340 for (j = i + 1; j < num_sack_blks; j++) {
4341 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
4342 sack = sack_blocks[i];
4343 sack_blocks[i] = sack_blocks[j];
4344 sack_blocks[j] = sack;
4349 * Now are any of the sack block ends the same (yes some
4350 * implememtations send these)?
4353 if (num_sack_blks > 1) {
4354 for (i = 0; i < num_sack_blks; i++) {
4355 for (j = i + 1; j < num_sack_blks; j++) {
4356 if (sack_blocks[i].end == sack_blocks[j].end) {
4358 * Ok these two have the same end we
4359 * want the smallest end and then
4360 * throw away the larger and start
4363 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
4365 * The second block covers
4366 * more area use that
4368 sack_blocks[i].start = sack_blocks[j].start;
4371 * Now collapse out the dup-sack and
4374 for (k = (j + 1); k < num_sack_blks; k++) {
4375 sack_blocks[j].start = sack_blocks[k].start;
4376 sack_blocks[j].end = sack_blocks[k].end;
4386 rsm = rack->r_ctl.rc_sacklast;
4387 for (i = 0; i < num_sack_blks; i++) {
4388 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts);
4390 rack->r_wanted_output++;
4392 sack_changed += acked;
4397 /* Something changed cancel the rack timer */
4398 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4400 if ((sack_changed) && (!IN_RECOVERY(tp->t_flags))) {
4402 * Ok we have a high probability that we need to go in to
4403 * recovery since we have data sack'd
4405 struct rack_sendmap *rsm;
4408 tsused = tcp_ts_getticks();
4409 rsm = tcp_rack_output(tp, rack, tsused);
4411 /* Enter recovery */
4412 rack->r_ctl.rc_rsm_start = rsm->r_start;
4413 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
4414 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
4415 entered_recovery = 1;
4416 rack_cong_signal(tp, NULL, CC_NDUPACK);
4418 * When we enter recovery we need to assure we send
4421 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
4422 rack->r_timer_override = 1;
4425 if (IN_RECOVERY(tp->t_flags) && (entered_recovery == 0)) {
4426 /* Deal with changed an PRR here (in recovery only) */
4427 uint32_t pipe, snd_una;
4429 rack->r_ctl.rc_prr_delivered += changed;
4430 /* Compute prr_sndcnt */
4431 if (SEQ_GT(tp->snd_una, th_ack)) {
4432 snd_una = tp->snd_una;
4436 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt;
4437 if (pipe > tp->snd_ssthresh) {
4440 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
4441 if (rack->r_ctl.rc_prr_recovery_fs > 0)
4442 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
4444 rack->r_ctl.rc_prr_sndcnt = 0;
4448 if (sndcnt > (long)rack->r_ctl.rc_prr_out)
4449 sndcnt -= rack->r_ctl.rc_prr_out;
4452 rack->r_ctl.rc_prr_sndcnt = sndcnt;
4456 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
4457 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
4460 if (changed > limit)
4462 limit += tp->t_maxseg;
4463 if (tp->snd_ssthresh > pipe) {
4464 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
4466 rack->r_ctl.rc_prr_sndcnt = min(0, limit);
4469 if (rack->r_ctl.rc_prr_sndcnt >= tp->t_maxseg) {
4470 rack->r_timer_override = 1;
4476 * Return value of 1, we do not need to call rack_process_data().
4477 * return value of 0, rack_process_data can be called.
4478 * For ret_val if its 0 the TCP is locked, if its non-zero
4479 * its unlocked and probably unsafe to touch the TCB.
4482 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
4483 struct tcpcb *tp, struct tcpopt *to,
4484 int32_t * ti_locked, uint32_t tiwin, int32_t tlen,
4485 int32_t * ofia, int32_t thflags, int32_t * ret_val)
4487 int32_t ourfinisacked = 0;
4488 int32_t nsegs, acked_amount;
4491 struct tcp_rack *rack;
4492 int32_t recovery = 0;
4494 rack = (struct tcp_rack *)tp->t_fb_ptr;
4495 if (SEQ_GT(th->th_ack, tp->snd_max)) {
4496 rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, ret_val);
4499 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
4500 rack_log_ack(tp, to, th);
4502 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
4504 * Old ack, behind (or duplicate to) the last one rcv'd
4505 * Note: Should mark reordering is occuring! We should also
4506 * look for sack blocks arriving e.g. ack 1, 4-4 then ack 1,
4507 * 3-3, 4-4 would be reording. As well as ack 1, 3-3 <no
4513 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
4514 * something we sent.
4516 if (tp->t_flags & TF_NEEDSYN) {
4518 * T/TCP: Connection was half-synchronized, and our SYN has
4519 * been ACK'd (so connection is now fully synchronized). Go
4520 * to non-starred state, increment snd_una for ACK of SYN,
4521 * and check if we can do window scaling.
4523 tp->t_flags &= ~TF_NEEDSYN;
4525 /* Do window scaling? */
4526 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
4527 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
4528 tp->rcv_scale = tp->request_r_scale;
4529 /* Send window already scaled. */
4532 nsegs = max(1, m->m_pkthdr.lro_nsegs);
4533 INP_WLOCK_ASSERT(tp->t_inpcb);
4535 acked = BYTES_THIS_ACK(tp, th);
4536 TCPSTAT_ADD(tcps_rcvackpack, nsegs);
4537 TCPSTAT_ADD(tcps_rcvackbyte, acked);
4540 * If we just performed our first retransmit, and the ACK arrives
4541 * within our recovery window, then it was a mistake to do the
4542 * retransmit in the first place. Recover our original cwnd and
4543 * ssthresh, and proceed to transmit where we left off.
4545 if (tp->t_flags & TF_PREVVALID) {
4546 tp->t_flags &= ~TF_PREVVALID;
4547 if (tp->t_rxtshift == 1 &&
4548 (int)(ticks - tp->t_badrxtwin) < 0)
4549 rack_cong_signal(tp, th, CC_RTO_ERR);
4552 * If we have a timestamp reply, update smoothed round trip time. If
4553 * no timestamp is present but transmit timer is running and timed
4554 * sequence number was acked, update smoothed round trip time. Since
4555 * we now have an rtt measurement, cancel the timer backoff (cf.,
4556 * Phil Karn's retransmit alg.). Recompute the initial retransmit
4559 * Some boxes send broken timestamp replies during the SYN+ACK
4560 * phase, ignore timestamps of 0 or we could calculate a huge RTT
4561 * and blow up the retransmit timer.
4564 * If all outstanding data is acked, stop retransmit timer and
4565 * remember to restart (more output or persist). If there is more
4566 * data to be acked, restart retransmit timer, using current
4567 * (possibly backed-off) value.
4569 if (th->th_ack == tp->snd_max) {
4570 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4571 rack->r_wanted_output++;
4574 * If no data (only SYN) was ACK'd, skip rest of ACK processing.
4578 *ofia = ourfinisacked;
4581 if (rack->r_ctl.rc_early_recovery) {
4582 if (IN_FASTRECOVERY(tp->t_flags)) {
4583 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
4584 tcp_rack_partialack(tp, th);
4586 rack_post_recovery(tp, th);
4592 * Let the congestion control algorithm update congestion control
4593 * related information. This typically means increasing the
4594 * congestion window.
4596 rack_ack_received(tp, rack, th, nsegs, CC_ACK, recovery);
4597 SOCKBUF_LOCK(&so->so_snd);
4598 acked_amount = min(acked, (int)sbavail(&so->so_snd));
4599 tp->snd_wnd -= acked_amount;
4600 mfree = sbcut_locked(&so->so_snd, acked_amount);
4601 if ((sbused(&so->so_snd) == 0) &&
4602 (acked > acked_amount) &&
4603 (tp->t_state >= TCPS_FIN_WAIT_1)) {
4606 /* NB: sowwakeup_locked() does an implicit unlock. */
4607 sowwakeup_locked(so);
4609 if (rack->r_ctl.rc_early_recovery == 0) {
4610 if (IN_FASTRECOVERY(tp->t_flags)) {
4611 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
4612 tcp_rack_partialack(tp, th);
4614 rack_post_recovery(tp, th);
4618 tp->snd_una = th->th_ack;
4619 if (SEQ_GT(tp->snd_una, tp->snd_recover))
4620 tp->snd_recover = tp->snd_una;
4622 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
4623 tp->snd_nxt = tp->snd_una;
4625 if (tp->snd_una == tp->snd_max) {
4626 /* Nothing left outstanding */
4627 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
4629 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4630 /* Set need output so persist might get set */
4631 rack->r_wanted_output++;
4632 if (rack_use_sack_filter)
4633 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
4634 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
4635 (sbavail(&so->so_snd) == 0) &&
4636 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
4638 * The socket was gone and the
4639 * peer sent data, time to
4644 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_UNLIMITED, tlen);
4649 *ofia = ourfinisacked;
4655 * Return value of 1, the TCB is unlocked and most
4656 * likely gone, return value of 0, the TCP is still
4660 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
4661 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
4662 int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
4665 * Update window information. Don't look at window if no ACK: TAC's
4666 * send garbage on first SYN.
4670 struct tcp_rack *rack;
4672 rack = (struct tcp_rack *)tp->t_fb_ptr;
4673 INP_WLOCK_ASSERT(tp->t_inpcb);
4675 nsegs = max(1, m->m_pkthdr.lro_nsegs);
4676 if ((thflags & TH_ACK) &&
4677 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
4678 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
4679 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
4680 /* keep track of pure window updates */
4682 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
4683 TCPSTAT_INC(tcps_rcvwinupd);
4684 tp->snd_wnd = tiwin;
4685 tp->snd_wl1 = th->th_seq;
4686 tp->snd_wl2 = th->th_ack;
4687 if (tp->snd_wnd > tp->max_sndwnd)
4688 tp->max_sndwnd = tp->snd_wnd;
4689 rack->r_wanted_output++;
4690 } else if (thflags & TH_ACK) {
4691 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
4692 tp->snd_wnd = tiwin;
4693 tp->snd_wl1 = th->th_seq;
4694 tp->snd_wl2 = th->th_ack;
4697 /* Was persist timer active and now we have window space? */
4698 if ((rack->rc_in_persist != 0) && tp->snd_wnd) {
4699 rack_exit_persist(tp, rack);
4700 tp->snd_nxt = tp->snd_max;
4701 /* Make sure we output to start the timer */
4702 rack->r_wanted_output++;
4705 * Process segments with URG.
4707 if ((thflags & TH_URG) && th->th_urp &&
4708 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
4710 * This is a kludge, but if we receive and accept random
4711 * urgent pointers, we'll crash in soreceive. It's hard to
4712 * imagine someone actually wanting to send this much urgent
4715 SOCKBUF_LOCK(&so->so_rcv);
4716 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
4717 th->th_urp = 0; /* XXX */
4718 thflags &= ~TH_URG; /* XXX */
4719 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
4720 goto dodata; /* XXX */
4723 * If this segment advances the known urgent pointer, then
4724 * mark the data stream. This should not happen in
4725 * CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since a
4726 * FIN has been received from the remote side. In these
4727 * states we ignore the URG.
4729 * According to RFC961 (Assigned Protocols), the urgent
4730 * pointer points to the last octet of urgent data. We
4731 * continue, however, to consider it to indicate the first
4732 * octet of data past the urgent section as the original
4733 * spec states (in one of two places).
4735 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) {
4736 tp->rcv_up = th->th_seq + th->th_urp;
4737 so->so_oobmark = sbavail(&so->so_rcv) +
4738 (tp->rcv_up - tp->rcv_nxt) - 1;
4739 if (so->so_oobmark == 0)
4740 so->so_rcv.sb_state |= SBS_RCVATMARK;
4742 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
4744 SOCKBUF_UNLOCK(&so->so_rcv);
4746 * Remove out of band data so doesn't get presented to user.
4747 * This can happen independent of advancing the URG pointer,
4748 * but if two URG's are pending at once, some out-of-band
4749 * data may creep in... ick.
4751 if (th->th_urp <= (uint32_t) tlen &&
4752 !(so->so_options & SO_OOBINLINE)) {
4753 /* hdr drop is delayed */
4754 tcp_pulloutofband(so, th, m, drop_hdrlen);
4758 * If no out of band data is expected, pull receive urgent
4759 * pointer along with the receive window.
4761 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
4762 tp->rcv_up = tp->rcv_nxt;
4765 INP_WLOCK_ASSERT(tp->t_inpcb);
4768 * Process the segment text, merging it into the TCP sequencing
4769 * queue, and arranging for acknowledgment of receipt if necessary.
4770 * This process logically involves adjusting tp->rcv_wnd as data is
4771 * presented to the user (this happens in tcp_usrreq.c, case
4772 * PRU_RCVD). If a FIN has already been received on this connection
4773 * then we just ignore the text.
4775 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
4776 IS_FASTOPEN(tp->t_flags));
4777 if ((tlen || (thflags & TH_FIN) || tfo_syn) &&
4778 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
4779 tcp_seq save_start = th->th_seq;
4781 m_adj(m, drop_hdrlen); /* delayed header drop */
4783 * Insert segment which includes th into TCP reassembly
4784 * queue with control block tp. Set thflags to whether
4785 * reassembly now includes a segment with FIN. This handles
4786 * the common case inline (segment is the next to be
4787 * received on an established connection, and the queue is
4788 * empty), avoiding linkage into and removal from the queue
4789 * and repetition of various conversions. Set DELACK for
4790 * segments received in order, but ack immediately when
4791 * segments are out of order (so fast retransmit can work).
4793 if (th->th_seq == tp->rcv_nxt &&
4794 LIST_EMPTY(&tp->t_segq) &&
4795 (TCPS_HAVEESTABLISHED(tp->t_state) ||
4797 if (DELAY_ACK(tp, tlen) || tfo_syn) {
4798 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4799 tp->t_flags |= TF_DELACK;
4801 rack->r_wanted_output++;
4802 tp->t_flags |= TF_ACKNOW;
4804 tp->rcv_nxt += tlen;
4805 thflags = th->th_flags & TH_FIN;
4806 TCPSTAT_ADD(tcps_rcvpack, nsegs);
4807 TCPSTAT_ADD(tcps_rcvbyte, tlen);
4808 SOCKBUF_LOCK(&so->so_rcv);
4809 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
4812 sbappendstream_locked(&so->so_rcv, m, 0);
4813 /* NB: sorwakeup_locked() does an implicit unlock. */
4814 sorwakeup_locked(so);
4817 * XXX: Due to the header drop above "th" is
4818 * theoretically invalid by now. Fortunately
4819 * m_adj() doesn't actually frees any mbufs when
4820 * trimming from the head.
4822 thflags = tcp_reass(tp, th, &tlen, m);
4823 tp->t_flags |= TF_ACKNOW;
4826 tcp_update_sack_list(tp, save_start, save_start + tlen);
4833 * If FIN is received ACK the FIN and let the user know that the
4834 * connection is closing.
4836 if (thflags & TH_FIN) {
4837 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
4840 * If connection is half-synchronized (ie NEEDSYN
4841 * flag on) then delay ACK, so it may be piggybacked
4842 * when SYN is sent. Otherwise, since we received a
4843 * FIN then no more input can be expected, send ACK
4846 if (tp->t_flags & TF_NEEDSYN) {
4847 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4848 tp->t_flags |= TF_DELACK;
4850 tp->t_flags |= TF_ACKNOW;
4854 switch (tp->t_state) {
4857 * In SYN_RECEIVED and ESTABLISHED STATES enter the
4860 case TCPS_SYN_RECEIVED:
4861 tp->t_starttime = ticks;
4863 case TCPS_ESTABLISHED:
4864 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4865 tcp_state_change(tp, TCPS_CLOSE_WAIT);
4869 * If still in FIN_WAIT_1 STATE FIN has not been
4870 * acked so enter the CLOSING state.
4872 case TCPS_FIN_WAIT_1:
4873 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4874 tcp_state_change(tp, TCPS_CLOSING);
4878 * In FIN_WAIT_2 state enter the TIME_WAIT state,
4879 * starting the time-wait timer, turning off the
4880 * other standard timers.
4882 case TCPS_FIN_WAIT_2:
4883 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4884 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
4885 KASSERT(*ti_locked == TI_RLOCKED, ("%s: dodata "
4886 "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
4889 *ti_locked = TI_UNLOCKED;
4890 INP_INFO_RUNLOCK(&V_tcbinfo);
4894 if (*ti_locked == TI_RLOCKED) {
4895 INP_INFO_RUNLOCK(&V_tcbinfo);
4896 *ti_locked = TI_UNLOCKED;
4899 * Return any desired output.
4901 if ((tp->t_flags & TF_ACKNOW) || (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
4902 rack->r_wanted_output++;
4904 KASSERT(*ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
4905 __func__, *ti_locked));
4906 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
4907 INP_WLOCK_ASSERT(tp->t_inpcb);
4912 * Here nothing is really faster, its just that we
4913 * have broken out the fast-data path also just like
4917 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
4918 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
4919 int32_t * ti_locked, uint32_t tiwin, int32_t nxt_pkt)
4922 int32_t newsize = 0; /* automatic sockbuf scaling */
4923 struct tcp_rack *rack;
4926 * The size of tcp_saveipgen must be the size of the max ip header,
4929 u_char tcp_saveipgen[IP6_HDR_LEN];
4930 struct tcphdr tcp_savetcp;
4935 * If last ACK falls within this segment's sequence numbers, record
4936 * the timestamp. NOTE that the test is modified according to the
4937 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
4939 if (__predict_false(th->th_seq != tp->rcv_nxt)) {
4942 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
4945 if (tiwin && tiwin != tp->snd_wnd) {
4948 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
4951 if (__predict_false((to->to_flags & TOF_TS) &&
4952 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
4955 if (__predict_false((th->th_ack != tp->snd_una))) {
4958 if (__predict_false(tlen > sbspace(&so->so_rcv))) {
4961 if ((to->to_flags & TOF_TS) != 0 &&
4962 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
4963 tp->ts_recent_age = tcp_ts_getticks();
4964 tp->ts_recent = to->to_tsval;
4966 rack = (struct tcp_rack *)tp->t_fb_ptr;
4968 * This is a pure, in-sequence data packet with nothing on the
4969 * reassembly queue and we have enough buffer space to take it.
4971 if (*ti_locked == TI_RLOCKED) {
4972 INP_INFO_RUNLOCK(&V_tcbinfo);
4973 *ti_locked = TI_UNLOCKED;
4975 nsegs = max(1, m->m_pkthdr.lro_nsegs);
4978 /* Clean receiver SACK report if present */
4979 if (tp->rcv_numsacks)
4980 tcp_clean_sackreport(tp);
4981 TCPSTAT_INC(tcps_preddat);
4982 tp->rcv_nxt += tlen;
4984 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
4986 tp->snd_wl1 = th->th_seq;
4988 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
4990 tp->rcv_up = tp->rcv_nxt;
4991 TCPSTAT_ADD(tcps_rcvpack, nsegs);
4992 TCPSTAT_ADD(tcps_rcvbyte, tlen);
4994 if (so->so_options & SO_DEBUG)
4995 tcp_trace(TA_INPUT, ostate, tp,
4996 (void *)tcp_saveipgen, &tcp_savetcp, 0);
4998 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
5000 /* Add data to socket buffer. */
5001 SOCKBUF_LOCK(&so->so_rcv);
5002 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5006 * Set new socket buffer size. Give up when limit is
5010 if (!sbreserve_locked(&so->so_rcv,
5012 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
5013 m_adj(m, drop_hdrlen); /* delayed header drop */
5014 sbappendstream_locked(&so->so_rcv, m, 0);
5015 rack_calc_rwin(so, tp);
5017 /* NB: sorwakeup_locked() does an implicit unlock. */
5018 sorwakeup_locked(so);
5019 if (DELAY_ACK(tp, tlen)) {
5020 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
5021 tp->t_flags |= TF_DELACK;
5023 tp->t_flags |= TF_ACKNOW;
5024 rack->r_wanted_output++;
5026 if ((tp->snd_una == tp->snd_max) && rack_use_sack_filter)
5027 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
5032 * This subfunction is used to try to highly optimize the
5033 * fast path. We again allow window updates that are
5034 * in sequence to remain in the fast-path. We also add
5035 * in the __predict's to attempt to help the compiler.
5036 * Note that if we return a 0, then we can *not* process
5037 * it and the caller should push the packet into the
5041 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
5042 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5043 int32_t * ti_locked, uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
5050 * The size of tcp_saveipgen must be the size of the max ip header,
5053 u_char tcp_saveipgen[IP6_HDR_LEN];
5054 struct tcphdr tcp_savetcp;
5058 struct tcp_rack *rack;
5060 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
5061 /* Old ack, behind (or duplicate to) the last one rcv'd */
5064 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
5065 /* Above what we have sent? */
5068 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
5069 /* We are retransmitting */
5072 if (__predict_false(tiwin == 0)) {
5076 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
5077 /* We need a SYN or a FIN, unlikely.. */
5080 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
5081 /* Timestamp is behind .. old ack with seq wrap? */
5084 if (__predict_false(IN_RECOVERY(tp->t_flags))) {
5085 /* Still recovering */
5088 rack = (struct tcp_rack *)tp->t_fb_ptr;
5089 if (rack->r_ctl.rc_sacked) {
5090 /* We have sack holes on our scoreboard */
5093 /* Ok if we reach here, we can process a fast-ack */
5094 nsegs = max(1, m->m_pkthdr.lro_nsegs);
5095 rack_log_ack(tp, to, th);
5096 /* Did the window get updated? */
5097 if (tiwin != tp->snd_wnd) {
5098 tp->snd_wnd = tiwin;
5099 tp->snd_wl1 = th->th_seq;
5100 if (tp->snd_wnd > tp->max_sndwnd)
5101 tp->max_sndwnd = tp->snd_wnd;
5103 if ((rack->rc_in_persist != 0) && (tp->snd_wnd >= tp->t_maxseg)) {
5104 rack_exit_persist(tp, rack);
5107 * If last ACK falls within this segment's sequence numbers, record
5108 * the timestamp. NOTE that the test is modified according to the
5109 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
5111 if ((to->to_flags & TOF_TS) != 0 &&
5112 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
5113 tp->ts_recent_age = tcp_ts_getticks();
5114 tp->ts_recent = to->to_tsval;
5117 * This is a pure ack for outstanding data.
5119 if (*ti_locked == TI_RLOCKED) {
5120 INP_INFO_RUNLOCK(&V_tcbinfo);
5121 *ti_locked = TI_UNLOCKED;
5123 TCPSTAT_INC(tcps_predack);
5126 * "bad retransmit" recovery.
5128 if (tp->t_flags & TF_PREVVALID) {
5129 tp->t_flags &= ~TF_PREVVALID;
5130 if (tp->t_rxtshift == 1 &&
5131 (int)(ticks - tp->t_badrxtwin) < 0)
5132 rack_cong_signal(tp, th, CC_RTO_ERR);
5135 * Recalculate the transmit timer / rtt.
5137 * Some boxes send broken timestamp replies during the SYN+ACK
5138 * phase, ignore timestamps of 0 or we could calculate a huge RTT
5139 * and blow up the retransmit timer.
5141 acked = BYTES_THIS_ACK(tp, th);
5144 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
5145 hhook_run_tcp_est_in(tp, th, to);
5148 TCPSTAT_ADD(tcps_rcvackpack, nsegs);
5149 TCPSTAT_ADD(tcps_rcvackbyte, acked);
5150 sbdrop(&so->so_snd, acked);
5152 * Let the congestion control algorithm update congestion control
5153 * related information. This typically means increasing the
5154 * congestion window.
5156 rack_ack_received(tp, rack, th, nsegs, CC_ACK, 0);
5158 tp->snd_una = th->th_ack;
5160 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
5162 tp->snd_wl2 = th->th_ack;
5165 /* ND6_HINT(tp); *//* Some progress has been made. */
5168 * If all outstanding data are acked, stop retransmit timer,
5169 * otherwise restart timer using current (possibly backed-off)
5170 * value. If process is waiting for space, wakeup/selwakeup/signal.
5171 * If data are ready to send, let tcp_output decide between more
5172 * output or persist.
5175 if (so->so_options & SO_DEBUG)
5176 tcp_trace(TA_INPUT, ostate, tp,
5177 (void *)tcp_saveipgen,
5180 if (tp->snd_una == tp->snd_max) {
5181 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
5183 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
5185 /* Wake up the socket if we have room to write more */
5187 if (sbavail(&so->so_snd)) {
5188 rack->r_wanted_output++;
5194 * Return value of 1, the TCB is unlocked and most
5195 * likely gone, return value of 0, the TCP is still
5199 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
5200 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5201 int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5203 int32_t ret_val = 0;
5205 int32_t ourfinisacked = 0;
5207 rack_calc_rwin(so, tp);
5209 * If the state is SYN_SENT: if seg contains an ACK, but not for our
5210 * SYN, drop the input. if seg contains a RST, then drop the
5211 * connection. if seg does not contain SYN, then drop it. Otherwise
5212 * this is an acceptable SYN segment initialize tp->rcv_nxt and
5213 * tp->irs if seg contains ack then advance tp->snd_una if seg
5214 * contains an ECE and ECN support is enabled, the stream is ECN
5215 * capable. if SYN has been acked change to ESTABLISHED else
5216 * SYN_RCVD state arrange for segment to be acked (eventually)
5217 * continue processing rest of data/controls, beginning with URG
5219 if ((thflags & TH_ACK) &&
5220 (SEQ_LEQ(th->th_ack, tp->iss) ||
5221 SEQ_GT(th->th_ack, tp->snd_max))) {
5222 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5225 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
5226 TCP_PROBE5(connect__refused, NULL, tp,
5227 mtod(m, const char *), tp, th);
5228 tp = tcp_drop(tp, ECONNREFUSED);
5229 rack_do_drop(m, tp, ti_locked);
5232 if (thflags & TH_RST) {
5233 rack_do_drop(m, tp, ti_locked);
5236 if (!(thflags & TH_SYN)) {
5237 rack_do_drop(m, tp, ti_locked);
5240 tp->irs = th->th_seq;
5242 if (thflags & TH_ACK) {
5243 int tfo_partial = 0;
5245 TCPSTAT_INC(tcps_connects);
5248 mac_socketpeer_set_from_mbuf(m, so);
5250 /* Do window scaling on this connection? */
5251 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
5252 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
5253 tp->rcv_scale = tp->request_r_scale;
5255 tp->rcv_adv += min(tp->rcv_wnd,
5256 TCP_MAXWIN << tp->rcv_scale);
5258 * If not all the data that was sent in the TFO SYN
5259 * has been acked, resend the remainder right away.
5261 if (IS_FASTOPEN(tp->t_flags) &&
5262 (tp->snd_una != tp->snd_max)) {
5263 tp->snd_nxt = th->th_ack;
5267 * If there's data, delay ACK; if there's also a FIN ACKNOW
5268 * will be turned on later.
5270 if (DELAY_ACK(tp, tlen) && tlen != 0 && (tfo_partial == 0)) {
5271 rack_timer_cancel(tp, (struct tcp_rack *)tp->t_fb_ptr,
5272 ((struct tcp_rack *)tp->t_fb_ptr)->r_ctl.rc_rcvtime, __LINE__);
5273 tp->t_flags |= TF_DELACK;
5275 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++;
5276 tp->t_flags |= TF_ACKNOW;
5279 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
5280 tp->t_flags |= TF_ECN_PERMIT;
5281 TCPSTAT_INC(tcps_ecn_shs);
5283 if (SEQ_GT(th->th_ack, tp->snd_una)) {
5285 * We advance snd_una for the
5286 * fast open case. If th_ack is
5287 * acknowledging data beyond
5288 * snd_una we can't just call
5289 * ack-processing since the
5290 * data stream in our send-map
5291 * will start at snd_una + 1 (one
5292 * beyond the SYN). If its just
5293 * equal we don't need to do that
5294 * and there is no send_map.
5299 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
5300 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
5302 tp->t_starttime = ticks;
5303 if (tp->t_flags & TF_NEEDFIN) {
5304 tcp_state_change(tp, TCPS_FIN_WAIT_1);
5305 tp->t_flags &= ~TF_NEEDFIN;
5308 tcp_state_change(tp, TCPS_ESTABLISHED);
5309 TCP_PROBE5(connect__established, NULL, tp,
5310 mtod(m, const char *), tp, th);
5315 * Received initial SYN in SYN-SENT[*] state => simultaneous
5316 * open. If segment contains CC option and there is a
5317 * cached CC, apply TAO test. If it succeeds, connection is *
5318 * half-synchronized. Otherwise, do 3-way handshake:
5319 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
5320 * there was no CC option, clear cached CC value.
5322 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
5323 tcp_state_change(tp, TCPS_SYN_RECEIVED);
5325 KASSERT(*ti_locked == TI_RLOCKED, ("%s: trimthenstep6: "
5326 "ti_locked %d", __func__, *ti_locked));
5327 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
5328 INP_WLOCK_ASSERT(tp->t_inpcb);
5330 * Advance th->th_seq to correspond to first data byte. If data,
5331 * trim to stay within window, dropping FIN if necessary.
5334 if (tlen > tp->rcv_wnd) {
5335 todrop = tlen - tp->rcv_wnd;
5339 TCPSTAT_INC(tcps_rcvpackafterwin);
5340 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
5342 tp->snd_wl1 = th->th_seq - 1;
5343 tp->rcv_up = th->th_seq;
5345 * Client side of transaction: already sent SYN and data. If the
5346 * remote host used T/TCP to validate the SYN, our data will be
5347 * ACK'd; if so, enter normal data segment processing in the middle
5348 * of step 5, ack processing. Otherwise, goto step 6.
5350 if (thflags & TH_ACK) {
5351 if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
5353 /* We may have changed to FIN_WAIT_1 above */
5354 if (tp->t_state == TCPS_FIN_WAIT_1) {
5356 * In FIN_WAIT_1 STATE in addition to the processing
5357 * for the ESTABLISHED state if our FIN is now
5358 * acknowledged then enter FIN_WAIT_2.
5360 if (ourfinisacked) {
5362 * If we can't receive any more data, then
5363 * closing user can proceed. Starting the
5364 * timer is contrary to the specification,
5365 * but if we don't get a FIN we'll hang
5368 * XXXjl: we should release the tp also, and
5369 * use a compressed state.
5371 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5372 soisdisconnected(so);
5373 tcp_timer_activate(tp, TT_2MSL,
5374 (tcp_fast_finwait2_recycle ?
5375 tcp_finwait2_timeout :
5378 tcp_state_change(tp, TCPS_FIN_WAIT_2);
5382 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5383 ti_locked, tiwin, thflags, nxt_pkt));
5387 * Return value of 1, the TCB is unlocked and most
5388 * likely gone, return value of 0, the TCP is still
5392 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
5393 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5394 int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5396 int32_t ret_val = 0;
5397 int32_t ourfinisacked = 0;
5399 rack_calc_rwin(so, tp);
5401 if ((thflags & TH_ACK) &&
5402 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
5403 SEQ_GT(th->th_ack, tp->snd_max))) {
5404 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5407 if (IS_FASTOPEN(tp->t_flags)) {
5409 * When a TFO connection is in SYN_RECEIVED, the
5410 * only valid packets are the initial SYN, a
5411 * retransmit/copy of the initial SYN (possibly with
5412 * a subset of the original data), a valid ACK, a
5415 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
5416 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5418 } else if (thflags & TH_SYN) {
5419 /* non-initial SYN is ignored */
5420 struct tcp_rack *rack;
5422 rack = (struct tcp_rack *)tp->t_fb_ptr;
5423 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
5424 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
5425 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
5426 rack_do_drop(m, NULL, ti_locked);
5429 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
5430 rack_do_drop(m, NULL, ti_locked);
5434 if (thflags & TH_RST)
5435 return (rack_process_rst(m, th, so, tp, ti_locked));
5437 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5438 * synchronized state.
5440 if (thflags & TH_SYN) {
5441 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
5445 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5446 * it's less than ts_recent, drop it.
5448 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5449 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5450 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
5454 * In the SYN-RECEIVED state, validate that the packet belongs to
5455 * this connection before trimming the data to fit the receive
5456 * window. Check the sequence number versus IRS since we know the
5457 * sequence numbers haven't wrapped. This is a partial fix for the
5458 * "LAND" DoS attack.
5460 if (SEQ_LT(th->th_seq, tp->irs)) {
5461 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5464 if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
5468 * If last ACK falls within this segment's sequence numbers, record
5469 * its timestamp. NOTE: 1) That the test incorporates suggestions
5470 * from the latest proposal of the tcplw@cray.com list (Braden
5471 * 1993/04/26). 2) That updating only on newer timestamps interferes
5472 * with our earlier PAWS tests, so this check should be solely
5473 * predicated on the sequence space of this segment. 3) That we
5474 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
5475 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
5476 * SEG.Len, This modified check allows us to overcome RFC1323's
5477 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
5478 * p.869. In such cases, we can still calculate the RTT correctly
5479 * when RCV.NXT == Last.ACK.Sent.
5481 if ((to->to_flags & TOF_TS) != 0 &&
5482 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
5483 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
5484 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
5485 tp->ts_recent_age = tcp_ts_getticks();
5486 tp->ts_recent = to->to_tsval;
5489 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
5490 * is on (half-synchronized state), then queue data for later
5491 * processing; else drop segment and return.
5493 if ((thflags & TH_ACK) == 0) {
5494 if (IS_FASTOPEN(tp->t_flags)) {
5495 tp->snd_wnd = tiwin;
5498 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5499 ti_locked, tiwin, thflags, nxt_pkt));
5501 TCPSTAT_INC(tcps_connects);
5503 /* Do window scaling? */
5504 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
5505 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
5506 tp->rcv_scale = tp->request_r_scale;
5507 tp->snd_wnd = tiwin;
5510 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* ->
5513 tp->t_starttime = ticks;
5514 if (tp->t_flags & TF_NEEDFIN) {
5515 tcp_state_change(tp, TCPS_FIN_WAIT_1);
5516 tp->t_flags &= ~TF_NEEDFIN;
5518 tcp_state_change(tp, TCPS_ESTABLISHED);
5519 TCP_PROBE5(accept__established, NULL, tp,
5520 mtod(m, const char *), tp, th);
5521 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
5522 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
5523 tp->t_tfo_pending = NULL;
5526 * Account for the ACK of our SYN prior to regular
5527 * ACK processing below.
5532 * TFO connections call cc_conn_init() during SYN
5533 * processing. Calling it again here for such connections
5534 * is not harmless as it would undo the snd_cwnd reduction
5535 * that occurs when a TFO SYN|ACK is retransmitted.
5537 if (!IS_FASTOPEN(tp->t_flags))
5541 * If segment contains data or ACK, will call tcp_reass() later; if
5542 * not, do so now to pass queued data to user.
5544 if (tlen == 0 && (thflags & TH_FIN) == 0)
5545 (void)tcp_reass(tp, (struct tcphdr *)0, 0,
5547 tp->snd_wl1 = th->th_seq - 1;
5548 if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
5551 if (tp->t_state == TCPS_FIN_WAIT_1) {
5552 /* We could have went to FIN_WAIT_1 (or EST) above */
5554 * In FIN_WAIT_1 STATE in addition to the processing for the
5555 * ESTABLISHED state if our FIN is now acknowledged then
5558 if (ourfinisacked) {
5560 * If we can't receive any more data, then closing
5561 * user can proceed. Starting the timer is contrary
5562 * to the specification, but if we don't get a FIN
5563 * we'll hang forever.
5565 * XXXjl: we should release the tp also, and use a
5568 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5569 soisdisconnected(so);
5570 tcp_timer_activate(tp, TT_2MSL,
5571 (tcp_fast_finwait2_recycle ?
5572 tcp_finwait2_timeout :
5575 tcp_state_change(tp, TCPS_FIN_WAIT_2);
5578 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5579 ti_locked, tiwin, thflags, nxt_pkt));
5583 * Return value of 1, the TCB is unlocked and most
5584 * likely gone, return value of 0, the TCP is still
5588 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
5589 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5590 int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5592 int32_t ret_val = 0;
5595 * Header prediction: check for the two common cases of a
5596 * uni-directional data xfer. If the packet has no control flags,
5597 * is in-sequence, the window didn't change and we're not
5598 * retransmitting, it's a candidate. If the length is zero and the
5599 * ack moved forward, we're the sender side of the xfer. Just free
5600 * the data acked & wake any higher level process that was blocked
5601 * waiting for space. If the length is non-zero and the ack didn't
5602 * move, we're the receiver side. If we're getting packets in-order
5603 * (the reassembly queue is empty), add the data toc The socket
5604 * buffer and note that we need a delayed ack. Make sure that the
5605 * hidden state-flags are also off. Since we check for
5606 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
5608 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
5609 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK)) == TH_ACK) &&
5610 __predict_true(LIST_EMPTY(&tp->t_segq)) &&
5611 __predict_true(th->th_seq == tp->rcv_nxt)) {
5612 struct tcp_rack *rack;
5614 rack = (struct tcp_rack *)tp->t_fb_ptr;
5616 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
5617 ti_locked, tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
5621 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
5622 ti_locked, tiwin, nxt_pkt)) {
5627 rack_calc_rwin(so, tp);
5629 if (thflags & TH_RST)
5630 return (rack_process_rst(m, th, so, tp, ti_locked));
5633 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5634 * synchronized state.
5636 if (thflags & TH_SYN) {
5637 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
5641 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5642 * it's less than ts_recent, drop it.
5644 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5645 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5646 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
5649 if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
5653 * If last ACK falls within this segment's sequence numbers, record
5654 * its timestamp. NOTE: 1) That the test incorporates suggestions
5655 * from the latest proposal of the tcplw@cray.com list (Braden
5656 * 1993/04/26). 2) That updating only on newer timestamps interferes
5657 * with our earlier PAWS tests, so this check should be solely
5658 * predicated on the sequence space of this segment. 3) That we
5659 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
5660 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
5661 * SEG.Len, This modified check allows us to overcome RFC1323's
5662 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
5663 * p.869. In such cases, we can still calculate the RTT correctly
5664 * when RCV.NXT == Last.ACK.Sent.
5666 if ((to->to_flags & TOF_TS) != 0 &&
5667 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
5668 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
5669 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
5670 tp->ts_recent_age = tcp_ts_getticks();
5671 tp->ts_recent = to->to_tsval;
5674 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
5675 * is on (half-synchronized state), then queue data for later
5676 * processing; else drop segment and return.
5678 if ((thflags & TH_ACK) == 0) {
5679 if (tp->t_flags & TF_NEEDSYN) {
5681 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5682 ti_locked, tiwin, thflags, nxt_pkt));
5684 } else if (tp->t_flags & TF_ACKNOW) {
5685 rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, &ret_val);
5688 rack_do_drop(m, NULL, ti_locked);
5695 if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, NULL, thflags, &ret_val)) {
5698 if (sbavail(&so->so_snd)) {
5699 if (rack_progress_timeout_check(tp)) {
5700 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
5701 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5705 /* State changes only happen in rack_process_data() */
5706 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5707 ti_locked, tiwin, thflags, nxt_pkt));
5711 * Return value of 1, the TCB is unlocked and most
5712 * likely gone, return value of 0, the TCP is still
5716 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
5717 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5718 int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5720 int32_t ret_val = 0;
5722 rack_calc_rwin(so, tp);
5723 if (thflags & TH_RST)
5724 return (rack_process_rst(m, th, so, tp, ti_locked));
5726 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5727 * synchronized state.
5729 if (thflags & TH_SYN) {
5730 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
5734 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5735 * it's less than ts_recent, drop it.
5737 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5738 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5739 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
5742 if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
5746 * If last ACK falls within this segment's sequence numbers, record
5747 * its timestamp. NOTE: 1) That the test incorporates suggestions
5748 * from the latest proposal of the tcplw@cray.com list (Braden
5749 * 1993/04/26). 2) That updating only on newer timestamps interferes
5750 * with our earlier PAWS tests, so this check should be solely
5751 * predicated on the sequence space of this segment. 3) That we
5752 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
5753 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
5754 * SEG.Len, This modified check allows us to overcome RFC1323's
5755 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
5756 * p.869. In such cases, we can still calculate the RTT correctly
5757 * when RCV.NXT == Last.ACK.Sent.
5759 if ((to->to_flags & TOF_TS) != 0 &&
5760 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
5761 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
5762 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
5763 tp->ts_recent_age = tcp_ts_getticks();
5764 tp->ts_recent = to->to_tsval;
5767 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
5768 * is on (half-synchronized state), then queue data for later
5769 * processing; else drop segment and return.
5771 if ((thflags & TH_ACK) == 0) {
5772 if (tp->t_flags & TF_NEEDSYN) {
5773 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5774 ti_locked, tiwin, thflags, nxt_pkt));
5776 } else if (tp->t_flags & TF_ACKNOW) {
5777 rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, &ret_val);
5780 rack_do_drop(m, NULL, ti_locked);
5787 if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, NULL, thflags, &ret_val)) {
5790 if (sbavail(&so->so_snd)) {
5791 if (rack_progress_timeout_check(tp)) {
5792 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
5793 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5797 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5798 ti_locked, tiwin, thflags, nxt_pkt));
5802 rack_check_data_after_close(struct mbuf *m,
5803 struct tcpcb *tp, int32_t *ti_locked, int32_t *tlen, struct tcphdr *th, struct socket *so)
5805 struct tcp_rack *rack;
5807 KASSERT(*ti_locked == TI_RLOCKED, ("%s: SS_NOFDEREF && "
5808 "CLOSE_WAIT && tlen ti_locked %d", __func__, *ti_locked));
5809 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
5810 rack = (struct tcp_rack *)tp->t_fb_ptr;
5811 if (rack->rc_allow_data_af_clo == 0) {
5814 TCPSTAT_INC(tcps_rcvafterclose);
5815 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_UNLIMITED, (*tlen));
5818 if (sbavail(&so->so_snd) == 0)
5820 /* Ok we allow data that is ignored and a followup reset */
5821 tp->rcv_nxt = th->th_seq + *tlen;
5822 tp->t_flags2 |= TF2_DROP_AF_DATA;
5823 rack->r_wanted_output = 1;
5829 * Return value of 1, the TCB is unlocked and most
5830 * likely gone, return value of 0, the TCP is still
5834 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
5835 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5836 int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5838 int32_t ret_val = 0;
5839 int32_t ourfinisacked = 0;
5841 rack_calc_rwin(so, tp);
5843 if (thflags & TH_RST)
5844 return (rack_process_rst(m, th, so, tp, ti_locked));
5846 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5847 * synchronized state.
5849 if (thflags & TH_SYN) {
5850 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
5854 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5855 * it's less than ts_recent, drop it.
5857 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5858 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5859 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
5862 if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
5866 * If new data are received on a connection after the user processes
5867 * are gone, then RST the other end.
5869 if ((so->so_state & SS_NOFDREF) && tlen) {
5870 if (rack_check_data_after_close(m, tp, ti_locked, &tlen, th, so))
5874 * If last ACK falls within this segment's sequence numbers, record
5875 * its timestamp. NOTE: 1) That the test incorporates suggestions
5876 * from the latest proposal of the tcplw@cray.com list (Braden
5877 * 1993/04/26). 2) That updating only on newer timestamps interferes
5878 * with our earlier PAWS tests, so this check should be solely
5879 * predicated on the sequence space of this segment. 3) That we
5880 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
5881 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
5882 * SEG.Len, This modified check allows us to overcome RFC1323's
5883 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
5884 * p.869. In such cases, we can still calculate the RTT correctly
5885 * when RCV.NXT == Last.ACK.Sent.
5887 if ((to->to_flags & TOF_TS) != 0 &&
5888 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
5889 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
5890 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
5891 tp->ts_recent_age = tcp_ts_getticks();
5892 tp->ts_recent = to->to_tsval;
5895 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
5896 * is on (half-synchronized state), then queue data for later
5897 * processing; else drop segment and return.
5899 if ((thflags & TH_ACK) == 0) {
5900 if (tp->t_flags & TF_NEEDSYN) {
5901 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5902 ti_locked, tiwin, thflags, nxt_pkt));
5903 } else if (tp->t_flags & TF_ACKNOW) {
5904 rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, &ret_val);
5907 rack_do_drop(m, NULL, ti_locked);
5914 if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
5917 if (ourfinisacked) {
5919 * If we can't receive any more data, then closing user can
5920 * proceed. Starting the timer is contrary to the
5921 * specification, but if we don't get a FIN we'll hang
5924 * XXXjl: we should release the tp also, and use a
5927 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5928 soisdisconnected(so);
5929 tcp_timer_activate(tp, TT_2MSL,
5930 (tcp_fast_finwait2_recycle ?
5931 tcp_finwait2_timeout :
5934 tcp_state_change(tp, TCPS_FIN_WAIT_2);
5936 if (sbavail(&so->so_snd)) {
5937 if (rack_progress_timeout_check(tp)) {
5938 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
5939 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5943 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5944 ti_locked, tiwin, thflags, nxt_pkt));
5948 * Return value of 1, the TCB is unlocked and most
5949 * likely gone, return value of 0, the TCP is still
5953 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
5954 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5955 int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5957 int32_t ret_val = 0;
5958 int32_t ourfinisacked = 0;
5960 rack_calc_rwin(so, tp);
5962 if (thflags & TH_RST)
5963 return (rack_process_rst(m, th, so, tp, ti_locked));
5965 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5966 * synchronized state.
5968 if (thflags & TH_SYN) {
5969 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
5973 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5974 * it's less than ts_recent, drop it.
5976 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5977 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5978 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
5981 if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
5985 * If new data are received on a connection after the user processes
5986 * are gone, then RST the other end.
5988 if ((so->so_state & SS_NOFDREF) && tlen) {
5989 if (rack_check_data_after_close(m, tp, ti_locked, &tlen, th, so))
5993 * If last ACK falls within this segment's sequence numbers, record
5994 * its timestamp. NOTE: 1) That the test incorporates suggestions
5995 * from the latest proposal of the tcplw@cray.com list (Braden
5996 * 1993/04/26). 2) That updating only on newer timestamps interferes
5997 * with our earlier PAWS tests, so this check should be solely
5998 * predicated on the sequence space of this segment. 3) That we
5999 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6000 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6001 * SEG.Len, This modified check allows us to overcome RFC1323's
6002 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6003 * p.869. In such cases, we can still calculate the RTT correctly
6004 * when RCV.NXT == Last.ACK.Sent.
6006 if ((to->to_flags & TOF_TS) != 0 &&
6007 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6008 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6009 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6010 tp->ts_recent_age = tcp_ts_getticks();
6011 tp->ts_recent = to->to_tsval;
6014 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
6015 * is on (half-synchronized state), then queue data for later
6016 * processing; else drop segment and return.
6018 if ((thflags & TH_ACK) == 0) {
6019 if (tp->t_flags & TF_NEEDSYN) {
6020 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6021 ti_locked, tiwin, thflags, nxt_pkt));
6022 } else if (tp->t_flags & TF_ACKNOW) {
6023 rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, &ret_val);
6026 rack_do_drop(m, NULL, ti_locked);
6033 if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
6036 if (ourfinisacked) {
6037 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6039 INP_INFO_RUNLOCK(&V_tcbinfo);
6040 *ti_locked = TI_UNLOCKED;
6044 if (sbavail(&so->so_snd)) {
6045 if (rack_progress_timeout_check(tp)) {
6046 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6047 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
6051 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6052 ti_locked, tiwin, thflags, nxt_pkt));
6056 * Return value of 1, the TCB is unlocked and most
6057 * likely gone, return value of 0, the TCP is still
6061 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
6062 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6063 int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
6065 int32_t ret_val = 0;
6066 int32_t ourfinisacked = 0;
6068 rack_calc_rwin(so, tp);
6070 if (thflags & TH_RST)
6071 return (rack_process_rst(m, th, so, tp, ti_locked));
6073 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
6074 * synchronized state.
6076 if (thflags & TH_SYN) {
6077 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
6081 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
6082 * it's less than ts_recent, drop it.
6084 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
6085 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
6086 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
6089 if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
6093 * If new data are received on a connection after the user processes
6094 * are gone, then RST the other end.
6096 if ((so->so_state & SS_NOFDREF) && tlen) {
6097 if (rack_check_data_after_close(m, tp, ti_locked, &tlen, th, so))
6101 * If last ACK falls within this segment's sequence numbers, record
6102 * its timestamp. NOTE: 1) That the test incorporates suggestions
6103 * from the latest proposal of the tcplw@cray.com list (Braden
6104 * 1993/04/26). 2) That updating only on newer timestamps interferes
6105 * with our earlier PAWS tests, so this check should be solely
6106 * predicated on the sequence space of this segment. 3) That we
6107 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6108 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6109 * SEG.Len, This modified check allows us to overcome RFC1323's
6110 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6111 * p.869. In such cases, we can still calculate the RTT correctly
6112 * when RCV.NXT == Last.ACK.Sent.
6114 if ((to->to_flags & TOF_TS) != 0 &&
6115 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6116 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6117 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6118 tp->ts_recent_age = tcp_ts_getticks();
6119 tp->ts_recent = to->to_tsval;
6122 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
6123 * is on (half-synchronized state), then queue data for later
6124 * processing; else drop segment and return.
6126 if ((thflags & TH_ACK) == 0) {
6127 if (tp->t_flags & TF_NEEDSYN) {
6128 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6129 ti_locked, tiwin, thflags, nxt_pkt));
6130 } else if (tp->t_flags & TF_ACKNOW) {
6131 rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, &ret_val);
6134 rack_do_drop(m, NULL, ti_locked);
6139 * case TCPS_LAST_ACK: Ack processing.
6141 if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
6144 if (ourfinisacked) {
6145 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6147 rack_do_drop(m, tp, ti_locked);
6150 if (sbavail(&so->so_snd)) {
6151 if (rack_progress_timeout_check(tp)) {
6152 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6153 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
6157 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6158 ti_locked, tiwin, thflags, nxt_pkt));
6163 * Return value of 1, the TCB is unlocked and most
6164 * likely gone, return value of 0, the TCP is still
6168 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
6169 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6170 int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
6172 int32_t ret_val = 0;
6173 int32_t ourfinisacked = 0;
6175 rack_calc_rwin(so, tp);
6177 /* Reset receive buffer auto scaling when not in bulk receive mode. */
6178 if (thflags & TH_RST)
6179 return (rack_process_rst(m, th, so, tp, ti_locked));
6181 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
6182 * synchronized state.
6184 if (thflags & TH_SYN) {
6185 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
6189 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
6190 * it's less than ts_recent, drop it.
6192 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
6193 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
6194 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
6197 if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
6201 * If new data are received on a connection after the user processes
6202 * are gone, then RST the other end.
6204 if ((so->so_state & SS_NOFDREF) &&
6206 if (rack_check_data_after_close(m, tp, ti_locked, &tlen, th, so))
6210 * If last ACK falls within this segment's sequence numbers, record
6211 * its timestamp. NOTE: 1) That the test incorporates suggestions
6212 * from the latest proposal of the tcplw@cray.com list (Braden
6213 * 1993/04/26). 2) That updating only on newer timestamps interferes
6214 * with our earlier PAWS tests, so this check should be solely
6215 * predicated on the sequence space of this segment. 3) That we
6216 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6217 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6218 * SEG.Len, This modified check allows us to overcome RFC1323's
6219 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6220 * p.869. In such cases, we can still calculate the RTT correctly
6221 * when RCV.NXT == Last.ACK.Sent.
6223 if ((to->to_flags & TOF_TS) != 0 &&
6224 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6225 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6226 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6227 tp->ts_recent_age = tcp_ts_getticks();
6228 tp->ts_recent = to->to_tsval;
6231 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
6232 * is on (half-synchronized state), then queue data for later
6233 * processing; else drop segment and return.
6235 if ((thflags & TH_ACK) == 0) {
6236 if (tp->t_flags & TF_NEEDSYN) {
6237 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6238 ti_locked, tiwin, thflags, nxt_pkt));
6239 } else if (tp->t_flags & TF_ACKNOW) {
6240 rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, &ret_val);
6243 rack_do_drop(m, NULL, ti_locked);
6250 if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
6253 if (sbavail(&so->so_snd)) {
6254 if (rack_progress_timeout_check(tp)) {
6255 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6256 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
6260 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6261 ti_locked, tiwin, thflags, nxt_pkt));
6266 rack_clear_rate_sample(struct tcp_rack *rack)
6268 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
6269 rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
6270 rack->r_ctl.rack_rs.rs_rtt_tot = 0;
6274 rack_init(struct tcpcb *tp)
6276 struct tcp_rack *rack = NULL;
6278 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
6279 if (tp->t_fb_ptr == NULL) {
6281 * We need to allocate memory but cant. The INP and INP_INFO
6282 * locks and they are recusive (happens during setup. So a
6283 * scheme to drop the locks fails :(
6288 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack));
6290 rack = (struct tcp_rack *)tp->t_fb_ptr;
6291 TAILQ_INIT(&rack->r_ctl.rc_map);
6292 TAILQ_INIT(&rack->r_ctl.rc_free);
6293 TAILQ_INIT(&rack->r_ctl.rc_tmap);
6296 rack->rc_inp = tp->t_inpcb;
6298 /* Probably not needed but lets be sure */
6299 rack_clear_rate_sample(rack);
6301 rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
6302 rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
6303 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
6304 rack->rc_pace_reduce = rack_slot_reduction;
6305 if (V_tcp_delack_enabled)
6306 tp->t_delayed_ack = 1;
6308 tp->t_delayed_ack = 0;
6309 rack->rc_pace_max_segs = rack_hptsi_segments;
6310 rack->r_ctl.rc_early_recovery_segs = rack_early_recovery_max_seg;
6311 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
6312 rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
6313 rack->r_ctl.rc_prop_reduce = rack_use_proportional_reduce;
6314 rack->r_idle_reduce_largest = rack_reduce_largest_on_idle;
6315 rack->r_enforce_min_pace = rack_min_pace_time;
6316 rack->r_min_pace_seg_thresh = rack_min_pace_time_seg_req;
6317 rack->r_ctl.rc_prop_rate = rack_proportional_rate;
6318 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
6319 rack->r_ctl.rc_early_recovery = rack_early_recovery;
6320 rack->rc_always_pace = rack_pace_every_seg;
6321 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
6322 rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
6323 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
6324 rack->r_ctl.rc_min_to = rack_min_to;
6325 rack->r_ctl.rc_prr_inc_var = rack_inc_var;
6326 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0);
6327 if (tp->snd_una != tp->snd_max) {
6328 /* Create a send map for the current outstanding data */
6329 struct rack_sendmap *rsm;
6331 rsm = rack_alloc(rack);
6333 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
6334 tp->t_fb_ptr = NULL;
6337 rsm->r_flags = RACK_OVERMAX;
6338 rsm->r_tim_lastsent[0] = tcp_ts_getticks();
6340 rsm->r_rtr_bytes = 0;
6341 rsm->r_start = tp->snd_una;
6342 rsm->r_end = tp->snd_max;
6344 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_map, rsm, r_next);
6345 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
6352 rack_handoff_ok(struct tcpcb *tp)
6354 if ((tp->t_state == TCPS_CLOSED) ||
6355 (tp->t_state == TCPS_LISTEN)) {
6356 /* Sure no problem though it may not stick */
6359 if ((tp->t_state == TCPS_SYN_SENT) ||
6360 (tp->t_state == TCPS_SYN_RECEIVED)) {
6362 * We really don't know you have to get to ESTAB or beyond
6367 if (tp->t_flags & TF_SACK_PERMIT) {
6371 * If we reach here we don't do SACK on this connection so we can
6378 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
6381 struct tcp_rack *rack;
6382 struct rack_sendmap *rsm;
6384 rack = (struct tcp_rack *)tp->t_fb_ptr;
6386 tcp_log_flowend(tp);
6388 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
6390 TAILQ_REMOVE(&rack->r_ctl.rc_map, rsm, r_next);
6391 uma_zfree(rack_zone, rsm);
6392 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
6394 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
6396 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_next);
6397 uma_zfree(rack_zone, rsm);
6398 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
6400 rack->rc_free_cnt = 0;
6401 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
6402 tp->t_fb_ptr = NULL;
6407 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
6409 switch (tp->t_state) {
6411 rack->r_state = TCPS_SYN_SENT;
6412 rack->r_substate = rack_do_syn_sent;
6414 case TCPS_SYN_RECEIVED:
6415 rack->r_state = TCPS_SYN_RECEIVED;
6416 rack->r_substate = rack_do_syn_recv;
6418 case TCPS_ESTABLISHED:
6419 rack->r_state = TCPS_ESTABLISHED;
6420 rack->r_substate = rack_do_established;
6422 case TCPS_CLOSE_WAIT:
6423 rack->r_state = TCPS_CLOSE_WAIT;
6424 rack->r_substate = rack_do_close_wait;
6426 case TCPS_FIN_WAIT_1:
6427 rack->r_state = TCPS_FIN_WAIT_1;
6428 rack->r_substate = rack_do_fin_wait_1;
6431 rack->r_state = TCPS_CLOSING;
6432 rack->r_substate = rack_do_closing;
6435 rack->r_state = TCPS_LAST_ACK;
6436 rack->r_substate = rack_do_lastack;
6438 case TCPS_FIN_WAIT_2:
6439 rack->r_state = TCPS_FIN_WAIT_2;
6440 rack->r_substate = rack_do_fin_wait_2;
6444 case TCPS_TIME_WAIT:
6447 panic("tcp tp:%p state:%d sees impossible state?", tp, tp->t_state);
6455 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
6458 * We received an ack, and then did not
6459 * call send or were bounced out due to the
6460 * hpts was running. Now a timer is up as well, is
6461 * it the right timer?
6463 struct rack_sendmap *rsm;
6466 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
6467 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
6469 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6470 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
6471 (tmr_up == PACE_TMR_RXT)) {
6472 /* Should be an RXT */
6476 /* Nothing outstanding? */
6477 if (tp->t_flags & TF_DELACK) {
6478 if (tmr_up == PACE_TMR_DELACK)
6479 /* We are supposed to have delayed ack up and we do */
6481 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) {
6483 * if we hit enobufs then we would expect the possiblity
6484 * of nothing outstanding and the RXT up (and the hptsi timer).
6487 } else if (((tcp_always_keepalive ||
6488 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
6489 (tp->t_state <= TCPS_CLOSING)) &&
6490 (tmr_up == PACE_TMR_KEEP) &&
6491 (tp->snd_max == tp->snd_una)) {
6492 /* We should have keep alive up and we do */
6496 if (rsm && (rsm->r_flags & RACK_SACK_PASSED)) {
6497 if ((tp->t_flags & TF_SENTFIN) &&
6498 ((tp->snd_max - tp->snd_una) == 1) &&
6499 (rsm->r_flags & RACK_HAS_FIN)) {
6500 /* needs to be a RXT */
6501 if (tmr_up == PACE_TMR_RXT)
6503 } else if (tmr_up == PACE_TMR_RACK)
6505 } else if (SEQ_GT(tp->snd_max,tp->snd_una) &&
6506 ((tmr_up == PACE_TMR_TLP) ||
6507 (tmr_up == PACE_TMR_RXT))) {
6509 * Either a TLP or RXT is fine if no sack-passed
6510 * is in place and data is outstanding.
6513 } else if (tmr_up == PACE_TMR_DELACK) {
6515 * If the delayed ack was going to go off
6516 * before the rtx/tlp/rack timer were going to
6517 * expire, then that would be the timer in control.
6518 * Note we don't check the time here trusting the
6524 * Ok the timer originally started is not what we want now.
6525 * We will force the hpts to be stopped if any, and restart
6526 * with the slot set to what was in the saved slot.
6528 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
6529 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0);
6533 rack_hpts_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
6534 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
6535 int32_t ti_locked, int32_t nxt_pkt, struct timeval *tv)
6537 int32_t thflags, retval, did_out = 0;
6538 int32_t way_out = 0;
6542 struct tcp_rack *rack;
6543 struct rack_sendmap *rsm;
6544 int32_t prev_state = 0;
6546 cts = tcp_tv_to_mssectick(tv);
6547 rack = (struct tcp_rack *)tp->t_fb_ptr;
6549 kern_prefetch(rack, &prev_state);
6551 thflags = th->th_flags;
6553 * If this is either a state-changing packet or current state isn't
6554 * established, we require a read lock on tcbinfo. Otherwise, we
6555 * allow the tcbinfo to be in either locked or unlocked, as the
6556 * caller may have unnecessarily acquired a lock due to a race.
6558 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
6559 tp->t_state != TCPS_ESTABLISHED) {
6560 KASSERT(ti_locked == TI_RLOCKED, ("%s ti_locked %d for "
6561 "SYN/FIN/RST/!EST", __func__, ti_locked));
6562 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6565 if (ti_locked == TI_RLOCKED) {
6566 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6568 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST "
6569 "ti_locked: %d", __func__, ti_locked));
6570 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
6574 INP_WLOCK_ASSERT(tp->t_inpcb);
6575 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
6577 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
6580 union tcp_log_stackspecific log;
6582 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
6583 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
6584 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
6585 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
6589 * Segment received on connection. Reset idle time and keep-alive
6590 * timer. XXX: This should be done after segment validation to
6591 * ignore broken/spoofed segs.
6593 if (tp->t_idle_reduce && (tp->snd_max == tp->snd_una)) {
6595 if ((tp->cwv_enabled) &&
6596 ((tp->cwv_cwnd_valid == 0) &&
6597 TCPS_HAVEESTABLISHED(tp->t_state) &&
6598 (tp->snd_cwnd > tp->snd_cwv.init_cwnd))) {
6599 tcp_newcwv_nvp_closedown(tp);
6602 if ((ticks - tp->t_rcvtime) >= tp->t_rxtcur) {
6603 counter_u64_add(rack_input_idle_reduces, 1);
6604 rack_cc_after_idle(tp,
6605 (rack->r_idle_reduce_largest ? 1 :0));
6608 rack->r_ctl.rc_rcvtime = cts;
6609 tp->t_rcvtime = ticks;
6612 if (tp->cwv_enabled) {
6613 if ((tp->cwv_cwnd_valid == 0) &&
6614 TCPS_HAVEESTABLISHED(tp->t_state) &&
6615 (tp->snd_cwnd > tp->snd_cwv.init_cwnd))
6616 tcp_newcwv_nvp_closedown(tp);
6620 * Unscale the window into a 32-bit value. For the SYN_SENT state
6621 * the scale is zero.
6623 tiwin = th->th_win << tp->snd_scale;
6624 #ifdef NETFLIX_STATS
6625 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
6628 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
6629 * this to occur after we've validated the segment.
6631 if (tp->t_flags & TF_ECN_PERMIT) {
6632 if (thflags & TH_CWR)
6633 tp->t_flags &= ~TF_ECN_SND_ECE;
6634 switch (iptos & IPTOS_ECN_MASK) {
6636 tp->t_flags |= TF_ECN_SND_ECE;
6637 TCPSTAT_INC(tcps_ecn_ce);
6639 case IPTOS_ECN_ECT0:
6640 TCPSTAT_INC(tcps_ecn_ect0);
6642 case IPTOS_ECN_ECT1:
6643 TCPSTAT_INC(tcps_ecn_ect1);
6646 /* Congestion experienced. */
6647 if (thflags & TH_ECE) {
6648 rack_cong_signal(tp, th, CC_ECN);
6652 * Parse options on any incoming segment.
6654 tcp_dooptions(&to, (u_char *)(th + 1),
6655 (th->th_off << 2) - sizeof(struct tcphdr),
6656 (thflags & TH_SYN) ? TO_SYN : 0);
6659 * If echoed timestamp is later than the current time, fall back to
6660 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
6661 * were used when this connection was established.
6663 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
6664 to.to_tsecr -= tp->ts_offset;
6665 if (TSTMP_GT(to.to_tsecr, cts))
6669 * If its the first time in we need to take care of options and
6670 * verify we can do SACK for rack!
6672 if (rack->r_state == 0) {
6673 /* Should be init'd by rack_init() */
6674 KASSERT(rack->rc_inp != NULL,
6675 ("%s: rack->rc_inp unexpectedly NULL", __func__));
6676 if (rack->rc_inp == NULL) {
6677 rack->rc_inp = tp->t_inpcb;
6681 * Process options only when we get SYN/ACK back. The SYN
6682 * case for incoming connections is handled in tcp_syncache.
6683 * According to RFC1323 the window field in a SYN (i.e., a
6684 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
6685 * this is traditional behavior, may need to be cleaned up.
6687 rack->r_cpu = inp_to_cpuid(tp->t_inpcb);
6688 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
6689 if ((to.to_flags & TOF_SCALE) &&
6690 (tp->t_flags & TF_REQ_SCALE)) {
6691 tp->t_flags |= TF_RCVD_SCALE;
6692 tp->snd_scale = to.to_wscale;
6695 * Initial send window. It will be updated with the
6696 * next incoming segment to the scaled value.
6698 tp->snd_wnd = th->th_win;
6699 if (to.to_flags & TOF_TS) {
6700 tp->t_flags |= TF_RCVD_TSTMP;
6701 tp->ts_recent = to.to_tsval;
6702 tp->ts_recent_age = cts;
6704 if (to.to_flags & TOF_MSS)
6705 tcp_mss(tp, to.to_mss);
6706 if ((tp->t_flags & TF_SACK_PERMIT) &&
6707 (to.to_flags & TOF_SACKPERM) == 0)
6708 tp->t_flags &= ~TF_SACK_PERMIT;
6711 * At this point we are at the initial call. Here we decide
6712 * if we are doing RACK or not. We do this by seeing if
6713 * TF_SACK_PERMIT is set, if not rack is *not* possible and
6714 * we switch to the default code.
6716 if ((tp->t_flags & TF_SACK_PERMIT) == 0) {
6717 tcp_switch_back_to_default(tp);
6718 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen,
6719 tlen, iptos, ti_locked);
6723 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
6724 tcp_set_hpts(tp->t_inpcb);
6725 rack_stop_all_timers(tp);
6726 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
6729 * This is the one exception case where we set the rack state
6730 * always. All other times (timers etc) we must have a rack-state
6731 * set (so we assure we have done the checks above for SACK).
6733 if (rack->r_state != tp->t_state)
6734 rack_set_state(tp, rack);
6735 if (SEQ_GT(th->th_ack, tp->snd_una) && (rsm = TAILQ_FIRST(&rack->r_ctl.rc_map)) != NULL)
6736 kern_prefetch(rsm, &prev_state);
6737 prev_state = rack->r_state;
6738 rack->r_ctl.rc_tlp_send_cnt = 0;
6739 rack_clear_rate_sample(rack);
6740 retval = (*rack->r_substate) (m, th, so,
6741 tp, &to, drop_hdrlen,
6742 tlen, &ti_locked, tiwin, thflags, nxt_pkt);
6744 if ((retval == 0) &&
6745 (tp->t_inpcb == NULL)) {
6746 panic("retval:%d tp:%p t_inpcb:NULL state:%d",
6747 retval, tp, prev_state);
6750 if (ti_locked != TI_UNLOCKED) {
6751 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6752 INP_INFO_RUNLOCK(&V_tcbinfo);
6753 ti_locked = TI_UNLOCKED;
6757 * If retval is 1 the tcb is unlocked and most likely the tp
6760 INP_WLOCK_ASSERT(tp->t_inpcb);
6761 tcp_rack_xmit_timer_commit(rack, tp);
6762 if (((tp->snd_max - tp->snd_una) > tp->snd_wnd) &&
6763 (rack->rc_in_persist == 0)){
6765 * The peer shrunk its window on us to the point
6766 * where we have sent too much. The only thing
6767 * we can do here is stop any timers and
6768 * enter persist. We most likely lost the last
6769 * bytes we sent but oh well, we will have to
6770 * retransmit them after the peer is caught up.
6772 if (rack->rc_inp->inp_in_hpts)
6773 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
6774 rack_timer_cancel(tp, rack, cts, __LINE__);
6775 rack_enter_persist(tp, rack, cts);
6776 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0);
6778 goto done_with_input;
6781 if (rack->r_wanted_output != 0) {
6783 (void)tp->t_fb->tfb_tcp_output(tp);
6785 rack_start_hpts_timer(rack, tp, cts, __LINE__, 0, 0, 0);
6787 if (((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
6788 (SEQ_GT(tp->snd_max, tp->snd_una) ||
6789 (tp->t_flags & TF_DELACK) ||
6790 ((tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
6791 (tp->t_state <= TCPS_CLOSING)))) {
6792 /* We could not send (probably in the hpts but stopped the timer earlier)? */
6793 if ((tp->snd_max == tp->snd_una) &&
6794 ((tp->t_flags & TF_DELACK) == 0) &&
6795 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
6796 /* keep alive not needed if we are hptsi output yet */
6799 if (rack->rc_inp->inp_in_hpts)
6800 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
6801 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0);
6805 /* Do we have the correct timer running? */
6806 rack_timer_audit(tp, rack, &so->so_snd);
6810 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out);
6812 rack->r_wanted_output = 0;
6814 if (tp->t_inpcb == NULL) {
6815 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d",
6817 retval, tp, prev_state);
6820 INP_WUNLOCK(tp->t_inpcb);
6825 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
6826 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
6831 struct tcp_function_block *tfb;
6832 struct tcp_rack *rack;
6835 rack = (struct tcp_rack *)tp->t_fb_ptr;
6836 if (rack->r_state == 0) {
6838 * Initial input (ACK to SYN-ACK etc)lets go ahead and get
6841 if (ti_locked != TI_RLOCKED && INP_INFO_TRY_RLOCK(&V_tcbinfo))
6842 ti_locked = TI_RLOCKED;
6843 if (ti_locked != TI_RLOCKED) {
6848 INP_INFO_RLOCK(&V_tcbinfo);
6849 ti_locked = TI_RLOCKED;
6851 if (in_pcbrele_wlocked(inp))
6853 if (inp == NULL || (inp->inp_flags2 & INP_FREED) ||
6854 (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED))) {
6855 /* The TCPCB went away. Free the packet. */
6856 INP_INFO_RUNLOCK(&V_tcbinfo);
6862 /* If the stack changed, call the correct stack. */
6863 if (tp->t_fb != tfb) {
6864 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp,
6865 drop_hdrlen, tlen, iptos, ti_locked);
6870 rack_hpts_do_segment(m, th, so, tp, drop_hdrlen,
6871 tlen, iptos, ti_locked, 0, &tv);
6874 if (ti_locked == TI_RLOCKED)
6875 INP_INFO_RUNLOCK(&V_tcbinfo);
6876 tcp_queue_to_input(tp, m, th, tlen, drop_hdrlen, iptos, (uint8_t) ti_locked);
6877 INP_WUNLOCK(tp->t_inpcb);
6880 rack_hpts_do_segment(m, th, so, tp, drop_hdrlen,
6881 tlen, iptos, ti_locked, 0, &tv);
6885 struct rack_sendmap *
6886 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
6888 struct rack_sendmap *rsm = NULL;
6890 uint32_t srtt_cur, srtt = 0, thresh = 0, ts_low = 0;
6892 /* Return the next guy to be re-transmitted */
6893 if (TAILQ_EMPTY(&rack->r_ctl.rc_map)) {
6896 if (tp->t_flags & TF_SENTFIN) {
6897 /* retran the end FIN? */
6900 /* ok lets look at this one */
6901 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6902 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
6905 rsm = rack_find_lowest_rsm(rack);
6910 srtt_cur = tp->t_srtt >> TCP_RTT_SHIFT;
6911 srtt = TICKS_2_MSEC(srtt_cur);
6912 if (rack->rc_rack_rtt && (srtt > rack->rc_rack_rtt))
6913 srtt = rack->rc_rack_rtt;
6914 if (rsm->r_flags & RACK_ACKED) {
6917 if ((rsm->r_flags & RACK_SACK_PASSED) == 0) {
6918 /* Its not yet ready */
6921 idx = rsm->r_rtr_cnt - 1;
6922 ts_low = rsm->r_tim_lastsent[idx];
6923 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
6924 if (tsused <= ts_low) {
6927 if ((tsused - ts_low) >= thresh) {
6934 rack_output(struct tcpcb *tp)
6937 uint32_t recwin, sendwin;
6939 int32_t len, flags, error = 0;
6942 uint32_t if_hw_tsomaxsegcount = 0;
6943 uint32_t if_hw_tsomaxsegsize;
6944 long tot_len_this_send = 0;
6945 struct ip *ip = NULL;
6947 struct ipovly *ipov = NULL;
6949 struct udphdr *udp = NULL;
6950 struct tcp_rack *rack;
6953 uint8_t wanted_cookie = 0;
6954 u_char opt[TCP_MAXOLEN];
6955 unsigned ipoptlen, optlen, hdrlen, ulen=0;
6958 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
6959 unsigned ipsec_optlen = 0;
6962 int32_t idle, sendalot;
6963 int32_t sub_from_prr = 0;
6964 volatile int32_t sack_rxmit;
6965 struct rack_sendmap *rsm = NULL;
6966 int32_t tso, mtu, would_have_fin = 0;
6970 uint8_t hpts_calling, doing_tlp = 0;
6971 int32_t do_a_prefetch;
6972 int32_t prefetch_rsm = 0;
6973 int32_t prefetch_so_done = 0;
6974 struct tcp_log_buffer *lgb = NULL;
6978 struct ip6_hdr *ip6 = NULL;
6981 /* setup and take the cache hits here */
6982 rack = (struct tcp_rack *)tp->t_fb_ptr;
6984 so = inp->inp_socket;
6986 kern_prefetch(sb, &do_a_prefetch);
6989 INP_WLOCK_ASSERT(inp);
6991 if (tp->t_flags & TF_TOE)
6992 return (tcp_offload_output(tp));
6996 * For TFO connections in SYN_RECEIVED, only allow the initial
6997 * SYN|ACK and those sent by the retransmit timer.
6999 if (IS_FASTOPEN(tp->t_flags) &&
7000 (tp->t_state == TCPS_SYN_RECEIVED) &&
7001 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */
7002 (rack->r_ctl.rc_resend == NULL)) /* not a retransmit */
7005 if (rack->r_state) {
7006 /* Use the cache line loaded if possible */
7007 isipv6 = rack->r_is_v6;
7009 isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
7012 cts = tcp_ts_getticks();
7013 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
7016 * We are on the hpts for some timer but not hptsi output.
7017 * Remove from the hpts unconditionally.
7019 rack_timer_cancel(tp, rack, cts, __LINE__);
7021 /* Mark that we have called rack_output(). */
7022 if ((rack->r_timer_override) ||
7023 (tp->t_flags & TF_FORCEDATA) ||
7024 (tp->t_state < TCPS_ESTABLISHED)) {
7025 if (tp->t_inpcb->inp_in_hpts)
7026 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
7027 } else if (tp->t_inpcb->inp_in_hpts) {
7029 * On the hpts you can't pass even if ACKNOW is on, we will
7030 * when the hpts fires.
7032 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
7035 hpts_calling = inp->inp_hpts_calls;
7036 inp->inp_hpts_calls = 0;
7037 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
7038 if (rack_process_timers(tp, rack, cts, hpts_calling)) {
7039 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
7043 rack->r_wanted_output = 0;
7044 rack->r_timer_override = 0;
7046 * Determine length of data that should be transmitted, and flags
7047 * that will be used. If there is some data or critical controls
7048 * (SYN, RST) to send, then transmit; otherwise, investigate
7051 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
7053 if (tp->cwv_enabled) {
7054 if ((tp->cwv_cwnd_valid == 0) &&
7055 TCPS_HAVEESTABLISHED(tp->t_state) &&
7056 (tp->snd_cwnd > tp->snd_cwv.init_cwnd))
7057 tcp_newcwv_nvp_closedown(tp);
7060 if (tp->t_idle_reduce) {
7061 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur))
7062 rack_cc_after_idle(tp,
7063 (rack->r_idle_reduce_largest ? 1 :0));
7065 tp->t_flags &= ~TF_LASTIDLE;
7067 if (tp->t_flags & TF_MORETOCOME) {
7068 tp->t_flags |= TF_LASTIDLE;
7074 * If we've recently taken a timeout, snd_max will be greater than
7075 * snd_nxt. There may be SACK information that allows us to avoid
7076 * resending already delivered data. Adjust snd_nxt accordingly.
7079 cts = tcp_ts_getticks();
7082 sb_offset = tp->snd_max - tp->snd_una;
7083 sendwin = min(tp->snd_wnd, tp->snd_cwnd);
7085 flags = tcp_outflags[tp->t_state];
7087 * Send any SACK-generated retransmissions. If we're explicitly
7088 * trying to send out new data (when sendalot is 1), bypass this
7089 * function. If we retransmit in fast recovery mode, decrement
7090 * snd_cwnd, since we're replacing a (future) new transmission with
7091 * a retransmission now, and we previously incremented snd_cwnd in
7095 * Still in sack recovery , reset rxmit flag to zero.
7097 while (rack->rc_free_cnt < rack_free_cache) {
7098 rsm = rack_alloc(rack);
7100 if (inp->inp_hpts_calls)
7103 goto just_return_nolock;
7105 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_next);
7106 rack->rc_free_cnt++;
7109 if (inp->inp_hpts_calls)
7110 inp->inp_hpts_calls = 0;
7114 if (flags & TH_RST) {
7118 if (rack->r_ctl.rc_tlpsend) {
7119 /* Tail loss probe */
7124 rsm = rack->r_ctl.rc_tlpsend;
7125 rack->r_ctl.rc_tlpsend = NULL;
7127 tlen = rsm->r_end - rsm->r_start;
7128 if (tlen > tp->t_maxseg)
7129 tlen = tp->t_maxseg;
7131 if (SEQ_GT(tp->snd_una, rsm->r_start)) {
7132 panic("tp:%p rack:%p snd_una:%u rsm:%p r_start:%u",
7133 tp, rack, tp->snd_una, rsm, rsm->r_start);
7136 sb_offset = rsm->r_start - tp->snd_una;
7137 cwin = min(tp->snd_wnd, tlen);
7139 } else if (rack->r_ctl.rc_resend) {
7140 /* Retransmit timer */
7141 rsm = rack->r_ctl.rc_resend;
7142 rack->r_ctl.rc_resend = NULL;
7143 len = rsm->r_end - rsm->r_start;
7146 sb_offset = rsm->r_start - tp->snd_una;
7147 if (len >= tp->t_maxseg) {
7150 KASSERT(sb_offset >= 0, ("%s: sack block to the left of una : %d",
7151 __func__, sb_offset));
7152 } else if ((rack->rc_in_persist == 0) &&
7153 ((rsm = tcp_rack_output(tp, rack, cts)) != NULL)) {
7156 if ((!IN_RECOVERY(tp->t_flags)) &&
7157 ((tp->t_flags & (TF_WASFRECOVERY | TF_WASCRECOVERY)) == 0)) {
7158 /* Enter recovery if not induced by a time-out */
7159 rack->r_ctl.rc_rsm_start = rsm->r_start;
7160 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
7161 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
7162 rack_cong_signal(tp, NULL, CC_NDUPACK);
7164 * When we enter recovery we need to assure we send
7167 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
7170 if (SEQ_LT(rsm->r_start, tp->snd_una)) {
7171 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
7172 tp, rack, rsm, rsm->r_start, tp->snd_una);
7175 tlen = rsm->r_end - rsm->r_start;
7176 sb_offset = rsm->r_start - tp->snd_una;
7177 if (tlen > rack->r_ctl.rc_prr_sndcnt) {
7178 len = rack->r_ctl.rc_prr_sndcnt;
7182 if (len >= tp->t_maxseg) {
7187 if ((rack->rc_timer_up == 0) &&
7190 * If its not a timer don't send a partial
7194 goto just_return_nolock;
7197 KASSERT(sb_offset >= 0, ("%s: sack block to the left of una : %d",
7198 __func__, sb_offset));
7202 TCPSTAT_INC(tcps_sack_rexmits);
7203 TCPSTAT_ADD(tcps_sack_rexmit_bytes,
7204 min(len, tp->t_maxseg));
7205 counter_u64_add(rack_rtm_prr_retran, 1);
7208 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
7209 /* we are retransmitting the fin */
7213 * When retransmitting data do *not* include the
7214 * FIN. This could happen from a TLP probe.
7221 rack->r_ctl.rc_rsm_at_retran = rsm;
7224 * Get standard flags, and add SYN or FIN if requested by 'hidden'
7227 if (tp->t_flags & TF_NEEDFIN)
7229 if (tp->t_flags & TF_NEEDSYN)
7231 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
7233 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
7235 kern_prefetch(end_rsm, &prefetch_rsm);
7240 * If in persist timeout with window of 0, send 1 byte. Otherwise,
7241 * if window is small but nonzero and time TF_SENTFIN expired, we
7242 * will send what we can and go to transmit state.
7244 if (tp->t_flags & TF_FORCEDATA) {
7247 * If we still have some data to send, then clear
7248 * the FIN bit. Usually this would happen below
7249 * when it realizes that we aren't sending all the
7250 * data. However, if we have exactly 1 byte of
7251 * unsent data, then it won't clear the FIN bit
7252 * below, and if we are in persist state, we wind up
7253 * sending the packet without recording that we sent
7256 * We can't just blindly clear the FIN bit, because
7257 * if we don't have any more data to send then the
7258 * probe will be the FIN itself.
7260 if (sb_offset < sbused(sb))
7264 if (rack->rc_in_persist)
7265 rack_exit_persist(tp, rack);
7267 * If we are dropping persist mode then we need to
7268 * correct snd_nxt/snd_max and off.
7270 tp->snd_nxt = tp->snd_max;
7271 sb_offset = tp->snd_nxt - tp->snd_una;
7275 * If snd_nxt == snd_max and we have transmitted a FIN, the
7276 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
7277 * negative length. This can also occur when TCP opens up its
7278 * congestion window while receiving additional duplicate acks after
7279 * fast-retransmit because TCP will reset snd_nxt to snd_max after
7280 * the fast-retransmit.
7282 * In the normal retransmit-FIN-only case, however, snd_nxt will be
7283 * set to snd_una, the sb_offset will be 0, and the length may wind
7286 * If sack_rxmit is true we are retransmitting from the scoreboard
7287 * in which case len is already set.
7289 if (sack_rxmit == 0) {
7292 avail = sbavail(sb);
7293 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail)
7294 sb_offset = tp->snd_nxt - tp->snd_una;
7297 if (IN_RECOVERY(tp->t_flags) == 0) {
7298 if (rack->r_ctl.rc_tlp_new_data) {
7299 /* TLP is forcing out new data */
7300 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
7301 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
7303 if (rack->r_ctl.rc_tlp_new_data > tp->snd_wnd)
7306 len = rack->r_ctl.rc_tlp_new_data;
7307 rack->r_ctl.rc_tlp_new_data = 0;
7310 if (sendwin > avail) {
7311 /* use the available */
7312 if (avail > sb_offset) {
7313 len = (int32_t)(avail - sb_offset);
7318 if (sendwin > sb_offset) {
7319 len = (int32_t)(sendwin - sb_offset);
7326 uint32_t outstanding;
7329 * We are inside of a SACK recovery episode and are
7330 * sending new data, having retransmitted all the
7331 * data possible so far in the scoreboard.
7333 outstanding = tp->snd_max - tp->snd_una;
7334 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd)
7336 else if (avail > sb_offset)
7337 len = avail - sb_offset;
7341 if (len > rack->r_ctl.rc_prr_sndcnt)
7342 len = rack->r_ctl.rc_prr_sndcnt;
7346 counter_u64_add(rack_rtm_prr_newdata, 1);
7349 if (len > tp->t_maxseg) {
7351 * We should never send more than a MSS when
7352 * retransmitting or sending new data in prr
7353 * mode unless the override flag is on. Most
7354 * likely the PRR algorithm is not going to
7355 * let us send a lot as well :-)
7357 if (rack->r_ctl.rc_prr_sendalot == 0)
7359 } else if (len < tp->t_maxseg) {
7361 * Do we send any? The idea here is if the
7362 * send empty's the socket buffer we want to
7363 * do it. However if not then lets just wait
7364 * for our prr_sndcnt to get bigger.
7368 leftinsb = sbavail(sb) - sb_offset;
7369 if (leftinsb > len) {
7370 /* This send does not empty the sb */
7376 if (prefetch_so_done == 0) {
7377 kern_prefetch(so, &prefetch_so_done);
7378 prefetch_so_done = 1;
7381 * Lop off SYN bit if it has already been sent. However, if this is
7382 * SYN-SENT state and if segment contains data and if we don't know
7383 * that foreign host supports TAO, suppress sending segment.
7385 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) &&
7386 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) {
7387 if (tp->t_state != TCPS_SYN_RECEIVED)
7390 * When sending additional segments following a TFO SYN|ACK,
7391 * do not include the SYN bit.
7393 if (IS_FASTOPEN(tp->t_flags) &&
7394 (tp->t_state == TCPS_SYN_RECEIVED))
7399 * Be careful not to send data and/or FIN on SYN segments. This
7400 * measure is needed to prevent interoperability problems with not
7401 * fully conformant TCP implementations.
7403 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
7408 * On TFO sockets, ensure no data is sent in the following cases:
7410 * - When retransmitting SYN|ACK on a passively-created socket
7412 * - When retransmitting SYN on an actively created socket
7414 * - When sending a zero-length cookie (cookie request) on an
7415 * actively created socket
7417 * - When the socket is in the CLOSED state (RST is being sent)
7419 if (IS_FASTOPEN(tp->t_flags) &&
7420 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
7421 ((tp->t_state == TCPS_SYN_SENT) &&
7422 (tp->t_tfo_client_cookie_len == 0)) ||
7425 /* Without fast-open there should never be data sent on a SYN */
7426 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags)))
7430 * If FIN has been sent but not acked, but we haven't been
7431 * called to retransmit, len will be < 0. Otherwise, window
7432 * shrank after we sent into it. If window shrank to 0,
7433 * cancel pending retransmit, pull snd_nxt back to (closed)
7434 * window, and set the persist timer if it isn't already
7435 * going. If the window didn't close completely, just wait
7438 * We also do a general check here to ensure that we will
7439 * set the persist timer when we have data to send, but a
7440 * 0-byte window. This makes sure the persist timer is set
7441 * even if the packet hits one of the "goto send" lines
7445 if ((tp->snd_wnd == 0) &&
7446 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
7447 (sb_offset < (int)sbavail(sb))) {
7448 tp->snd_nxt = tp->snd_una;
7449 rack_enter_persist(tp, rack, cts);
7452 /* len will be >= 0 after this point. */
7453 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
7454 tcp_sndbuf_autoscale(tp, so, sendwin);
7456 * Decide if we can use TCP Segmentation Offloading (if supported by
7459 * TSO may only be used if we are in a pure bulk sending state. The
7460 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
7461 * options prevent using TSO. With TSO the TCP header is the same
7462 * (except for the sequence number) for all generated packets. This
7463 * makes it impossible to transmit any options which vary per
7464 * generated segment or packet.
7466 * IPv4 handling has a clear separation of ip options and ip header
7467 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
7468 * the right thing below to provide length of just ip options and thus
7469 * checking for ipoptlen is enough to decide if ip options are present.
7474 ipoptlen = ip6_optlen(tp->t_inpcb);
7477 if (tp->t_inpcb->inp_options)
7478 ipoptlen = tp->t_inpcb->inp_options->m_len -
7479 offsetof(struct ipoption, ipopt_list);
7482 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
7484 * Pre-calculate here as we save another lookup into the darknesses
7485 * of IPsec that way and can actually decide if TSO is ok.
7488 if (isipv6 && IPSEC_ENABLED(ipv6))
7489 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb);
7495 if (IPSEC_ENABLED(ipv4))
7496 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb);
7500 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
7501 ipoptlen += ipsec_optlen;
7503 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > tp->t_maxseg &&
7504 (tp->t_port == 0) &&
7505 ((tp->t_flags & TF_SIGNATURE) == 0) &&
7506 tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
7510 uint32_t outstanding;
7512 outstanding = tp->snd_max - tp->snd_una;
7513 if (tp->t_flags & TF_SENTFIN) {
7515 * If we sent a fin, snd_max is 1 higher than
7520 if (outstanding > 0) {
7522 * This is sub-optimal. We only send a stand alone
7523 * FIN on its own segment.
7525 if (flags & TH_FIN) {
7529 } else if (sack_rxmit) {
7530 if ((rsm->r_flags & RACK_HAS_FIN) == 0)
7533 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
7538 recwin = sbspace(&so->so_rcv);
7541 * Sender silly window avoidance. We transmit under the following
7542 * conditions when len is non-zero:
7544 * - We have a full segment (or more with TSO) - This is the last
7545 * buffer in a write()/send() and we are either idle or running
7546 * NODELAY - we've timed out (e.g. persist timer) - we have more
7547 * then 1/2 the maximum send window's worth of data (receiver may be
7548 * limited the window size) - we need to retransmit
7551 if (len >= tp->t_maxseg) {
7556 * NOTE! on localhost connections an 'ack' from the remote
7557 * end may occur synchronously with the output and cause us
7558 * to flush a buffer queued with moretocome. XXX
7561 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
7562 (idle || (tp->t_flags & TF_NODELAY)) &&
7563 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(&so->so_snd)) &&
7564 (tp->t_flags & TF_NOPUSH) == 0) {
7568 if (tp->t_flags & TF_FORCEDATA) { /* typ. timeout case */
7572 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */
7575 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
7579 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */
7589 * Sending of standalone window updates.
7591 * Window updates are important when we close our window due to a
7592 * full socket buffer and are opening it again after the application
7593 * reads data from it. Once the window has opened again and the
7594 * remote end starts to send again the ACK clock takes over and
7595 * provides the most current window information.
7597 * We must avoid the silly window syndrome whereas every read from
7598 * the receive buffer, no matter how small, causes a window update
7599 * to be sent. We also should avoid sending a flurry of window
7600 * updates when the socket buffer had queued a lot of data and the
7601 * application is doing small reads.
7603 * Prevent a flurry of pointless window updates by only sending an
7604 * update when we can increase the advertized window by more than
7605 * 1/4th of the socket buffer capacity. When the buffer is getting
7606 * full or is very small be more aggressive and send an update
7607 * whenever we can increase by two mss sized segments. In all other
7608 * situations the ACK's to new incoming data will carry further
7611 * Don't send an independent window update if a delayed ACK is
7612 * pending (it will get piggy-backed on it) or the remote side
7613 * already has done a half-close and won't send more data. Skip
7614 * this if the connection is in T/TCP half-open state.
7616 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
7617 !(tp->t_flags & TF_DELACK) &&
7618 !TCPS_HAVERCVDFIN(tp->t_state)) {
7620 * "adv" is the amount we could increase the window, taking
7621 * into account that we are limited by TCP_MAXWIN <<
7627 adv = min(recwin, (long)TCP_MAXWIN << tp->rcv_scale);
7628 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
7629 oldwin = (tp->rcv_adv - tp->rcv_nxt);
7635 * If the new window size ends up being the same as the old
7636 * size when it is scaled, then don't force a window update.
7638 if (oldwin >> tp->rcv_scale == (adv + oldwin) >> tp->rcv_scale)
7641 if (adv >= (int32_t)(2 * tp->t_maxseg) &&
7642 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
7643 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
7644 so->so_rcv.sb_hiwat <= 8 * tp->t_maxseg)) {
7648 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat)
7654 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
7655 * is also a catch-all for the retransmit timer timeout case.
7657 if (tp->t_flags & TF_ACKNOW) {
7661 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
7665 if (SEQ_GT(tp->snd_up, tp->snd_una)) {
7670 * If our state indicates that FIN should be sent and we have not
7671 * yet done so, then we need to send.
7673 if (flags & TH_FIN) {
7674 if ((tp->t_flags & TF_SENTFIN) ||
7675 (((tp->t_flags & TF_SENTFIN) == 0) &&
7676 (tp->snd_nxt == tp->snd_una))) {
7682 * No reason to send a segment, just return.
7687 if (tot_len_this_send == 0)
7688 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
7689 rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, tot_len_this_send, 1);
7690 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling);
7691 tp->t_flags &= ~TF_FORCEDATA;
7695 if (doing_tlp == 0) {
7697 * Data not a TLP, and its not the rxt firing. If it is the
7698 * rxt firing, we want to leave the tlp_in_progress flag on
7699 * so we don't send another TLP. It has to be a rack timer
7700 * or normal send (response to acked data) to clear the tlp
7703 rack->rc_tlp_in_progress = 0;
7705 SOCKBUF_LOCK_ASSERT(sb);
7707 if (len >= tp->t_maxseg)
7708 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
7710 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
7713 * Before ESTABLISHED, force sending of initial options unless TCP
7714 * set not to do any options. NOTE: we assume that the IP/TCP header
7715 * plus TCP options always fit in a single mbuf, leaving room for a
7716 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
7717 * + optlen <= MCLBYTES
7722 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
7725 hdrlen = sizeof(struct tcpiphdr);
7728 * Compute options for segment. We only have to care about SYN and
7729 * established connection segments. Options for SYN-ACK segments
7730 * are handled in TCP syncache.
7733 if ((tp->t_flags & TF_NOOPT) == 0) {
7734 /* Maximum segment size. */
7735 if (flags & TH_SYN) {
7736 tp->snd_nxt = tp->iss;
7737 to.to_mss = tcp_mssopt(&inp->inp_inc);
7738 #ifdef NETFLIX_TCPOUDP
7740 to.to_mss -= V_tcp_udp_tunneling_overhead;
7742 to.to_flags |= TOF_MSS;
7745 * On SYN or SYN|ACK transmits on TFO connections,
7746 * only include the TFO option if it is not a
7747 * retransmit, as the presence of the TFO option may
7748 * have caused the original SYN or SYN|ACK to have
7749 * been dropped by a middlebox.
7751 if (IS_FASTOPEN(tp->t_flags) &&
7752 (tp->t_rxtshift == 0)) {
7753 if (tp->t_state == TCPS_SYN_RECEIVED) {
7754 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
7756 (u_int8_t *)&tp->t_tfo_cookie.server;
7757 to.to_flags |= TOF_FASTOPEN;
7759 } else if (tp->t_state == TCPS_SYN_SENT) {
7761 tp->t_tfo_client_cookie_len;
7763 tp->t_tfo_cookie.client;
7764 to.to_flags |= TOF_FASTOPEN;
7767 * If we wind up having more data to
7768 * send with the SYN than can fit in
7769 * one segment, don't send any more
7770 * until the SYN|ACK comes back from
7777 /* Window scaling. */
7778 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
7779 to.to_wscale = tp->request_r_scale;
7780 to.to_flags |= TOF_SCALE;
7783 if ((tp->t_flags & TF_RCVD_TSTMP) ||
7784 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
7785 to.to_tsval = cts + tp->ts_offset;
7786 to.to_tsecr = tp->ts_recent;
7787 to.to_flags |= TOF_TS;
7789 /* Set receive buffer autosizing timestamp. */
7790 if (tp->rfbuf_ts == 0 &&
7791 (so->so_rcv.sb_flags & SB_AUTOSIZE))
7792 tp->rfbuf_ts = tcp_ts_getticks();
7793 /* Selective ACK's. */
7795 to.to_flags |= TOF_SACKPERM;
7796 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
7797 tp->rcv_numsacks > 0) {
7798 to.to_flags |= TOF_SACK;
7799 to.to_nsacks = tp->rcv_numsacks;
7800 to.to_sacks = (u_char *)tp->sackblks;
7802 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
7803 /* TCP-MD5 (RFC2385). */
7804 if (tp->t_flags & TF_SIGNATURE)
7805 to.to_flags |= TOF_SIGNATURE;
7806 #endif /* TCP_SIGNATURE */
7808 /* Processing the options. */
7809 hdrlen += optlen = tcp_addoptions(&to, opt);
7811 * If we wanted a TFO option to be added, but it was unable
7812 * to fit, ensure no data is sent.
7814 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
7815 !(to.to_flags & TOF_FASTOPEN))
7818 #ifdef NETFLIX_TCPOUDP
7820 if (V_tcp_udp_tunneling_port == 0) {
7821 /* The port was removed?? */
7822 SOCKBUF_UNLOCK(&so->so_snd);
7823 return (EHOSTUNREACH);
7825 hdrlen += sizeof(struct udphdr);
7829 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
7830 ipoptlen += ipsec_optlen;
7834 * Adjust data length if insertion of options will bump the packet
7835 * length beyond the t_maxseg length. Clear the FIN bit because we
7836 * cut off the tail of the segment.
7838 if (len + optlen + ipoptlen > tp->t_maxseg) {
7839 if (flags & TH_FIN) {
7844 uint32_t if_hw_tsomax;
7848 /* extract TSO information */
7849 if_hw_tsomax = tp->t_tsomax;
7850 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
7851 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
7852 KASSERT(ipoptlen == 0,
7853 ("%s: TSO can't do IP options", __func__));
7856 * Check if we should limit by maximum payload
7859 if (if_hw_tsomax != 0) {
7860 /* compute maximum TSO length */
7861 max_len = (if_hw_tsomax - hdrlen -
7865 } else if (len > max_len) {
7871 * Prevent the last segment from being fractional
7872 * unless the send sockbuf can be emptied:
7874 max_len = (tp->t_maxseg - optlen);
7875 if ((sb_offset + len) < sbavail(sb)) {
7876 moff = len % (u_int)max_len;
7883 * In case there are too many small fragments don't
7886 if (len <= max_len) {
7892 * Send the FIN in a separate segment after the bulk
7893 * sending is done. We don't trust the TSO
7894 * implementations to clear the FIN flag on all but
7897 if (tp->t_flags & TF_NEEDFIN)
7901 len = tp->t_maxseg - optlen - ipoptlen;
7906 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
7907 ("%s: len > IP_MAXPACKET", __func__));
7910 if (max_linkhdr + hdrlen > MCLBYTES)
7912 if (max_linkhdr + hdrlen > MHLEN)
7914 panic("tcphdr too big");
7918 * This KASSERT is here to catch edge cases at a well defined place.
7919 * Before, those had triggered (random) panic conditions further
7922 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
7927 * We have outstanding data, don't send a fin by itself!.
7932 * Grab a header mbuf, attaching a copy of data to be transmitted,
7933 * and initialize the header from the template for sends on this
7940 if (rack->rc_pace_max_segs)
7941 max_val = rack->rc_pace_max_segs * tp->t_maxseg;
7945 * We allow a limit on sending with hptsi.
7947 if (len > max_val) {
7951 if (MHLEN < hdrlen + max_linkhdr)
7952 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
7955 m = m_gethdr(M_NOWAIT, MT_DATA);
7963 m->m_data += max_linkhdr;
7967 * Start the m_copy functions from the closest mbuf to the
7968 * sb_offset in the socket buffer chain.
7970 mb = sbsndptr_noadv(sb, sb_offset, &moff);
7971 if (len <= MHLEN - hdrlen - max_linkhdr) {
7972 m_copydata(mb, moff, (int)len,
7973 mtod(m, caddr_t)+hdrlen);
7974 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
7975 sbsndptr_adv(sb, mb, len);
7978 struct sockbuf *msb;
7980 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
7984 m->m_next = tcp_m_copym(mb, moff, &len,
7985 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb);
7986 if (len <= (tp->t_maxseg - optlen)) {
7988 * Must have ran out of mbufs for the copy
7989 * shorten it to no longer need tso. Lets
7990 * not put on sendalot since we are low on
7995 if (m->m_next == NULL) {
8003 if ((tp->t_flags & TF_FORCEDATA) && len == 1) {
8004 TCPSTAT_INC(tcps_sndprobe);
8005 #ifdef NETFLIX_STATS
8006 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
8007 stats_voi_update_abs_u32(tp->t_stats,
8008 VOI_TCP_RETXPB, len);
8010 stats_voi_update_abs_u64(tp->t_stats,
8013 } else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
8014 if (rsm && (rsm->r_flags & RACK_TLP)) {
8016 * TLP should not count in retran count, but
8019 counter_u64_add(rack_tlp_retran, 1);
8020 counter_u64_add(rack_tlp_retran_bytes, len);
8022 tp->t_sndrexmitpack++;
8023 TCPSTAT_INC(tcps_sndrexmitpack);
8024 TCPSTAT_ADD(tcps_sndrexmitbyte, len);
8026 #ifdef NETFLIX_STATS
8027 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
8031 TCPSTAT_INC(tcps_sndpack);
8032 TCPSTAT_ADD(tcps_sndbyte, len);
8033 #ifdef NETFLIX_STATS
8034 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
8039 * If we're sending everything we've got, set PUSH. (This
8040 * will keep happy those implementations which only give
8041 * data to the user when a buffer fills or a PUSH comes in.)
8043 if (sb_offset + len == sbused(sb) &&
8049 * Are we doing hptsi, if so we must calculate the slot. We
8050 * only do hptsi in ESTABLISHED and with no RESET being
8051 * sent where we have data to send.
8053 if (((tp->t_state == TCPS_ESTABLISHED) ||
8054 (tp->t_state == TCPS_CLOSE_WAIT) ||
8055 ((tp->t_state == TCPS_FIN_WAIT_1) &&
8056 ((tp->t_flags & TF_SENTFIN) == 0) &&
8057 ((flags & TH_FIN) == 0))) &&
8058 ((flags & TH_RST) == 0) &&
8059 (rack->rc_always_pace)) {
8061 * We use the most optimistic possible cwnd/srtt for
8062 * sending calculations. This will make our
8063 * calculation anticipate getting more through
8064 * quicker then possible. But thats ok we don't want
8065 * the peer to have a gap in data sending.
8067 uint32_t srtt, cwnd, tr_perms = 0;
8069 if (rack->r_ctl.rc_rack_min_rtt)
8070 srtt = rack->r_ctl.rc_rack_min_rtt;
8072 srtt = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT));
8073 if (rack->r_ctl.rc_rack_largest_cwnd)
8074 cwnd = rack->r_ctl.rc_rack_largest_cwnd;
8076 cwnd = tp->snd_cwnd;
8077 tr_perms = cwnd / srtt;
8078 if (tr_perms == 0) {
8079 tr_perms = tp->t_maxseg;
8081 tot_len_this_send += len;
8083 * Calculate how long this will take to drain, if
8084 * the calculation comes out to zero, thats ok we
8085 * will use send_a_lot to possibly spin around for
8086 * more increasing tot_len_this_send to the point
8087 * that its going to require a pace, or we hit the
8088 * cwnd. Which in that case we are just waiting for
8091 slot = tot_len_this_send / tr_perms;
8092 /* Now do we reduce the time so we don't run dry? */
8093 if (slot && rack->rc_pace_reduce) {
8096 reduce = (slot / rack->rc_pace_reduce);
8097 if (reduce < slot) {
8102 if (rack->r_enforce_min_pace &&
8104 (tot_len_this_send >= (rack->r_min_pace_seg_thresh * tp->t_maxseg))) {
8105 /* We are enforcing a minimum pace time of 1ms */
8106 slot = rack->r_enforce_min_pace;
8112 if (tp->t_flags & TF_ACKNOW)
8113 TCPSTAT_INC(tcps_sndacks);
8114 else if (flags & (TH_SYN | TH_FIN | TH_RST))
8115 TCPSTAT_INC(tcps_sndctrl);
8116 else if (SEQ_GT(tp->snd_up, tp->snd_una))
8117 TCPSTAT_INC(tcps_sndurg);
8119 TCPSTAT_INC(tcps_sndwinup);
8121 m = m_gethdr(M_NOWAIT, MT_DATA);
8128 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
8133 m->m_data += max_linkhdr;
8136 SOCKBUF_UNLOCK_ASSERT(sb);
8137 m->m_pkthdr.rcvif = (struct ifnet *)0;
8139 mac_inpcb_create_mbuf(inp, m);
8143 ip6 = mtod(m, struct ip6_hdr *);
8144 #ifdef NETFLIX_TCPOUDP
8146 udp = (struct udphdr *)((caddr_t)ip6 + ipoptlen + sizeof(struct ip6_hdr));
8147 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
8148 udp->uh_dport = tp->t_port;
8149 ulen = hdrlen + len - sizeof(struct ip6_hdr);
8150 udp->uh_ulen = htons(ulen);
8151 th = (struct tcphdr *)(udp + 1);
8154 th = (struct tcphdr *)(ip6 + 1);
8155 tcpip_fillheaders(inp, ip6, th);
8159 ip = mtod(m, struct ip *);
8161 ipov = (struct ipovly *)ip;
8163 #ifdef NETFLIX_TCPOUDP
8165 udp = (struct udphdr *)((caddr_t)ip + ipoptlen + sizeof(struct ip));
8166 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
8167 udp->uh_dport = tp->t_port;
8168 ulen = hdrlen + len - sizeof(struct ip);
8169 udp->uh_ulen = htons(ulen);
8170 th = (struct tcphdr *)(udp + 1);
8173 th = (struct tcphdr *)(ip + 1);
8174 tcpip_fillheaders(inp, ip, th);
8177 * Fill in fields, remembering maximum advertised window for use in
8178 * delaying messages about window sizes. If resending a FIN, be sure
8179 * not to use a new sequence number.
8181 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
8182 tp->snd_nxt == tp->snd_max)
8185 * If we are starting a connection, send ECN setup SYN packet. If we
8186 * are on a retransmit, we may resend those bits a number of times
8189 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) {
8190 if (tp->t_rxtshift >= 1) {
8191 if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
8192 flags |= TH_ECE | TH_CWR;
8194 flags |= TH_ECE | TH_CWR;
8196 if (tp->t_state == TCPS_ESTABLISHED &&
8197 (tp->t_flags & TF_ECN_PERMIT)) {
8199 * If the peer has ECN, mark data packets with ECN capable
8200 * transmission (ECT). Ignore pure ack packets,
8201 * retransmissions and window probes.
8203 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) &&
8204 !((tp->t_flags & TF_FORCEDATA) && len == 1)) {
8207 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
8210 ip->ip_tos |= IPTOS_ECN_ECT0;
8211 TCPSTAT_INC(tcps_ecn_ect0);
8214 * Reply with proper ECN notifications.
8216 if (tp->t_flags & TF_ECN_SND_CWR) {
8218 tp->t_flags &= ~TF_ECN_SND_CWR;
8220 if (tp->t_flags & TF_ECN_SND_ECE)
8224 * If we are doing retransmissions, then snd_nxt will not reflect
8225 * the first unsent octet. For ACK only packets, we do not want the
8226 * sequence number of the retransmitted packet, we want the sequence
8227 * number of the next unsent octet. So, if there is no data (and no
8228 * SYN or FIN), use snd_max instead of snd_nxt when filling in
8229 * ti_seq. But if we are in persist state, snd_max might reflect
8230 * one byte beyond the right edge of the window, so use snd_nxt in
8231 * that case, since we know we aren't doing a retransmission.
8232 * (retransmit and persist are mutually exclusive...)
8234 if (sack_rxmit == 0) {
8235 if (len || (flags & (TH_SYN | TH_FIN)) ||
8236 rack->rc_in_persist) {
8237 th->th_seq = htonl(tp->snd_nxt);
8238 rack_seq = tp->snd_nxt;
8239 } else if (flags & TH_RST) {
8241 * For a Reset send the last cum ack in sequence
8242 * (this like any other choice may still generate a
8243 * challenge ack, if a ack-update packet is in
8246 th->th_seq = htonl(tp->snd_una);
8247 rack_seq = tp->snd_una;
8249 th->th_seq = htonl(tp->snd_max);
8250 rack_seq = tp->snd_max;
8253 th->th_seq = htonl(rsm->r_start);
8254 rack_seq = rsm->r_start;
8256 th->th_ack = htonl(tp->rcv_nxt);
8258 bcopy(opt, th + 1, optlen);
8259 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
8261 th->th_flags = flags;
8263 * Calculate receive window. Don't shrink window, but avoid silly
8266 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
8267 recwin < (long)tp->t_maxseg)
8269 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
8270 recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
8271 recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
8272 if (recwin > (long)TCP_MAXWIN << tp->rcv_scale)
8273 recwin = (long)TCP_MAXWIN << tp->rcv_scale;
8276 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
8277 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is
8278 * handled in syncache.
8281 th->th_win = htons((u_short)
8282 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
8284 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
8286 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
8287 * window. This may cause the remote transmitter to stall. This
8288 * flag tells soreceive() to disable delayed acknowledgements when
8289 * draining the buffer. This can occur if the receiver is
8290 * attempting to read more data than can be buffered prior to
8291 * transmitting on the connection.
8293 if (th->th_win == 0) {
8295 tp->t_flags |= TF_RXWIN0SENT;
8297 tp->t_flags &= ~TF_RXWIN0SENT;
8298 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
8299 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
8300 th->th_flags |= TH_URG;
8303 * If no urgent pointer to send, then we pull the urgent
8304 * pointer to the left edge of the send window so that it
8305 * doesn't drift into the send window on sequence number
8308 tp->snd_up = tp->snd_una; /* drag it along */
8310 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
8311 if (to.to_flags & TOF_SIGNATURE) {
8313 * Calculate MD5 signature and put it into the place
8314 * determined before.
8315 * NOTE: since TCP options buffer doesn't point into
8316 * mbuf's data, calculate offset and use it.
8318 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
8319 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
8321 * Do not send segment if the calculation of MD5
8322 * digest has failed.
8330 * Put TCP length in extended header, and then checksum extended
8333 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
8337 * ip6_plen is not need to be filled now, and will be filled
8341 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
8342 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
8343 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
8344 th->th_sum = htons(0);
8346 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
8347 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
8348 th->th_sum = in6_cksum_pseudo(ip6,
8349 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
8354 #if defined(INET6) && defined(INET)
8360 m->m_pkthdr.csum_flags = CSUM_UDP;
8361 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
8362 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
8363 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
8364 th->th_sum = htons(0);
8366 m->m_pkthdr.csum_flags = CSUM_TCP;
8367 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
8368 th->th_sum = in_pseudo(ip->ip_src.s_addr,
8369 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
8370 IPPROTO_TCP + len + optlen));
8372 /* IP version must be set here for ipv4/ipv6 checking later */
8373 KASSERT(ip->ip_v == IPVERSION,
8374 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
8379 * Enable TSO and specify the size of the segments. The TCP pseudo
8380 * header checksum is always provided. XXX: Fixme: This is currently
8381 * not the case for IPv6.
8384 KASSERT(len > tp->t_maxseg - optlen,
8385 ("%s: len <= tso_segsz", __func__));
8386 m->m_pkthdr.csum_flags |= CSUM_TSO;
8387 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
8389 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
8390 KASSERT(len + hdrlen + ipoptlen - ipsec_optlen == m_length(m, NULL),
8391 ("%s: mbuf chain shorter than expected: %d + %u + %u - %u != %u",
8392 __func__, len, hdrlen, ipoptlen, ipsec_optlen, m_length(m, NULL)));
8394 KASSERT(len + hdrlen + ipoptlen == m_length(m, NULL),
8395 ("%s: mbuf chain shorter than expected: %d + %u + %u != %u",
8396 __func__, len, hdrlen, ipoptlen, m_length(m, NULL)));
8400 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
8401 hhook_run_tcp_est_out(tp, th, &to, len, tso);
8408 if (so->so_options & SO_DEBUG) {
8415 save = ipov->ih_len;
8416 ipov->ih_len = htons(m->m_pkthdr.len /* - hdrlen +
8417 * (th->th_off << 2) */ );
8419 tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0);
8423 ipov->ih_len = save;
8425 #endif /* TCPDEBUG */
8427 /* We're getting ready to send; log now. */
8428 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
8429 union tcp_log_stackspecific log;
8431 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
8432 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
8433 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
8434 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
8435 if (rsm || sack_rxmit) {
8436 log.u_bbr.flex8 = 1;
8438 log.u_bbr.flex8 = 0;
8440 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
8441 len, &log, false, NULL, NULL, 0, NULL);
8446 * Fill in IP length and desired time to live and send to IP level.
8447 * There should be a better way to handle ttl and tos; we could keep
8448 * them in the template, but need a way to checksum without them.
8451 * m->m_pkthdr.len should have been set before cksum calcuration,
8452 * because in6_cksum() need it.
8457 * we separately set hoplimit for every segment, since the
8458 * user might want to change the value via setsockopt. Also,
8459 * desired default hop limit might be changed via Neighbor
8462 ip6->ip6_hlim = in6_selecthlim(inp, NULL);
8465 * Set the packet size here for the benefit of DTrace
8466 * probes. ip6_output() will set it properly; it's supposed
8467 * to include the option header lengths as well.
8469 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
8471 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
8472 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
8474 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
8476 if (tp->t_state == TCPS_SYN_SENT)
8477 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
8479 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
8480 /* TODO: IPv6 IP6TOS_ECT bit on */
8481 error = ip6_output(m, tp->t_inpcb->in6p_outputopts,
8483 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
8486 if (error == EMSGSIZE && inp->inp_route6.ro_rt != NULL)
8487 mtu = inp->inp_route6.ro_rt->rt_mtu;
8490 #if defined(INET) && defined(INET6)
8495 ip->ip_len = htons(m->m_pkthdr.len);
8497 if (inp->inp_vflag & INP_IPV6PROTO)
8498 ip->ip_ttl = in6_selecthlim(inp, NULL);
8501 * If we do path MTU discovery, then we set DF on every
8502 * packet. This might not be the best thing to do according
8503 * to RFC3390 Section 2. However the tcp hostcache migitates
8504 * the problem so it affects only the first tcp connection
8507 * NB: Don't set DF on small MTU/MSS to have a safe
8510 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
8511 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
8512 if (tp->t_port == 0 || len < V_tcp_minmss) {
8513 ip->ip_off |= htons(IP_DF);
8516 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
8519 if (tp->t_state == TCPS_SYN_SENT)
8520 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
8522 TCP_PROBE5(send, NULL, tp, ip, tp, th);
8524 error = ip_output(m, tp->t_inpcb->inp_options, &inp->inp_route,
8525 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0,
8527 if (error == EMSGSIZE && inp->inp_route.ro_rt != NULL)
8528 mtu = inp->inp_route.ro_rt->rt_mtu;
8534 lgb->tlb_errno = error;
8538 * In transmit state, time the transmission and arrange for the
8539 * retransmit. In persist state, just set snd_max.
8543 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
8544 else if (len == 1) {
8545 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
8546 } else if (len > 1) {
8549 idx = (len / tp->t_maxseg) + 3;
8550 if (idx >= TCP_MSS_ACCT_ATIMER)
8551 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
8553 counter_u64_add(rack_out_size[idx], 1);
8556 if (sub_from_prr && (error == 0)) {
8557 rack->r_ctl.rc_prr_sndcnt -= len;
8560 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, cts,
8562 if ((tp->t_flags & TF_FORCEDATA) == 0 ||
8563 (rack->rc_in_persist == 0)) {
8564 #ifdef NETFLIX_STATS
8565 tcp_seq startseq = tp->snd_nxt;
8569 * Advance snd_nxt over sequence space of this segment.
8572 /* We don't log or do anything with errors */
8575 if (flags & (TH_SYN | TH_FIN)) {
8578 if (flags & TH_FIN) {
8580 tp->t_flags |= TF_SENTFIN;
8583 /* In the ENOBUFS case we do *not* update snd_max */
8588 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
8589 if (tp->snd_una == tp->snd_max) {
8591 * Update the time we just added data since
8592 * none was outstanding.
8594 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
8595 tp->t_acktime = ticks;
8597 tp->snd_max = tp->snd_nxt;
8598 #ifdef NETFLIX_STATS
8599 if (!(tp->t_flags & TF_GPUTINPROG) && len) {
8600 tp->t_flags |= TF_GPUTINPROG;
8601 tp->gput_seq = startseq;
8602 tp->gput_ack = startseq +
8603 ulmin(sbavail(sb) - sb_offset, sendwin);
8604 tp->gput_ts = tcp_ts_getticks();
8609 * Set retransmit timer if not currently set, and not doing
8610 * a pure ack or a keep-alive probe. Initial value for
8611 * retransmit timer is smoothed round-trip time + 2 *
8612 * round-trip time variance. Initialize shift counter which
8613 * is used for backoff of retransmit time.
8616 if ((tp->snd_wnd == 0) &&
8617 TCPS_HAVEESTABLISHED(tp->t_state)) {
8619 * If the persists timer was set above (right before
8620 * the goto send), and still needs to be on. Lets
8621 * make sure all is canceled. If the persist timer
8622 * is not running, we want to get it up.
8624 if (rack->rc_in_persist == 0) {
8625 rack_enter_persist(tp, rack, cts);
8630 * Persist case, update snd_max but since we are in persist
8631 * mode (no window) we do not update snd_nxt.
8640 if (flags & TH_FIN) {
8642 tp->t_flags |= TF_SENTFIN;
8644 /* In the ENOBUFS case we do *not* update snd_max */
8645 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) {
8646 if (tp->snd_una == tp->snd_max) {
8648 * Update the time we just added data since
8649 * none was outstanding.
8651 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
8652 tp->t_acktime = ticks;
8654 tp->snd_max = tp->snd_nxt + len;
8659 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */
8661 * Failures do not advance the seq counter above. For the
8662 * case of ENOBUFS we will fall out and retry in 1ms with
8663 * the hpts. Everything else will just have to retransmit
8666 * In any case, we do not want to loop around for another
8667 * send without a good reason.
8672 tp->t_flags &= ~TF_FORCEDATA;
8673 tp->t_softerror = error;
8678 * Pace us right away to retry in a some
8681 slot = 1 + rack->rc_enobuf;
8682 if (rack->rc_enobuf < 255)
8684 if (slot > (rack->rc_rack_rtt / 2)) {
8685 slot = rack->rc_rack_rtt / 2;
8690 counter_u64_add(rack_saw_enobuf, 1);
8695 * For some reason the interface we used initially
8696 * to send segments changed to another or lowered
8697 * its MTU. If TSO was active we either got an
8698 * interface without TSO capabilits or TSO was
8699 * turned off. If we obtained mtu from ip_output()
8700 * then update it and try again.
8703 tp->t_flags &= ~TF_TSO;
8705 tcp_mss_update(tp, -1, mtu, NULL, NULL);
8709 rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, 0, 1);
8710 tp->t_flags &= ~TF_FORCEDATA;
8713 counter_u64_add(rack_saw_enetunreach, 1);
8717 if (TCPS_HAVERCVDSYN(tp->t_state)) {
8718 tp->t_softerror = error;
8723 rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, 0, 1);
8724 tp->t_flags &= ~TF_FORCEDATA;
8728 rack->rc_enobuf = 0;
8730 TCPSTAT_INC(tcps_sndtotal);
8733 * Data sent (as far as we can tell). If this advertises a larger
8734 * window than any other segment, then remember the size of the
8735 * advertised window. Any pending ACK has now been sent.
8737 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
8738 tp->rcv_adv = tp->rcv_nxt + recwin;
8739 tp->last_ack_sent = tp->rcv_nxt;
8740 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
8742 rack->r_tlp_running = 0;
8743 if ((flags & TH_RST) || (would_have_fin == 1)) {
8745 * We don't send again after a RST. We also do *not* send
8746 * again if we would have had a find, but now have
8753 /* set the rack tcb into the slot N */
8754 counter_u64_add(rack_paced_segments, 1);
8755 } else if (sendalot) {
8757 counter_u64_add(rack_unpaced_segments, 1);
8759 tp->t_flags &= ~TF_FORCEDATA;
8762 counter_u64_add(rack_unpaced_segments, 1);
8764 tp->t_flags &= ~TF_FORCEDATA;
8765 rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, tot_len_this_send, 1);
8770 * rack_ctloutput() must drop the inpcb lock before performing copyin on
8771 * socket option arguments. When it re-acquires the lock after the copy, it
8772 * has to revalidate that the connection is still valid for the socket
8776 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
8777 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
8779 int32_t error = 0, optval;
8781 switch (sopt->sopt_name) {
8782 case TCP_RACK_PROP_RATE:
8784 case TCP_RACK_TLP_REDUCE:
8785 case TCP_RACK_EARLY_RECOV:
8786 case TCP_RACK_PACE_ALWAYS:
8788 case TCP_RACK_PACE_REDUCE:
8789 case TCP_RACK_PACE_MAX_SEG:
8790 case TCP_RACK_PRR_SENDALOT:
8791 case TCP_RACK_MIN_TO:
8792 case TCP_RACK_EARLY_SEG:
8793 case TCP_RACK_REORD_THRESH:
8794 case TCP_RACK_REORD_FADE:
8795 case TCP_RACK_TLP_THRESH:
8796 case TCP_RACK_PKT_DELAY:
8797 case TCP_RACK_TLP_USE:
8798 case TCP_RACK_TLP_INC_VAR:
8799 case TCP_RACK_IDLE_REDUCE_HIGH:
8800 case TCP_RACK_MIN_PACE:
8801 case TCP_RACK_MIN_PACE_SEG:
8802 case TCP_BBR_RACK_RTT_USE:
8803 case TCP_DATA_AFTER_CLOSE:
8806 return (tcp_default_ctloutput(so, sopt, inp, tp));
8810 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
8814 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
8816 return (ECONNRESET);
8818 tp = intotcpcb(inp);
8819 rack = (struct tcp_rack *)tp->t_fb_ptr;
8820 switch (sopt->sopt_name) {
8821 case TCP_RACK_PROP_RATE:
8822 if ((optval <= 0) || (optval >= 100)) {
8826 RACK_OPTS_INC(tcp_rack_prop_rate);
8827 rack->r_ctl.rc_prop_rate = optval;
8829 case TCP_RACK_TLP_USE:
8830 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
8834 RACK_OPTS_INC(tcp_tlp_use);
8835 rack->rack_tlp_threshold_use = optval;
8838 /* RACK proportional rate reduction (bool) */
8839 RACK_OPTS_INC(tcp_rack_prop);
8840 rack->r_ctl.rc_prop_reduce = optval;
8842 case TCP_RACK_TLP_REDUCE:
8843 /* RACK TLP cwnd reduction (bool) */
8844 RACK_OPTS_INC(tcp_rack_tlp_reduce);
8845 rack->r_ctl.rc_tlp_cwnd_reduce = optval;
8847 case TCP_RACK_EARLY_RECOV:
8848 /* Should recovery happen early (bool) */
8849 RACK_OPTS_INC(tcp_rack_early_recov);
8850 rack->r_ctl.rc_early_recovery = optval;
8852 case TCP_RACK_PACE_ALWAYS:
8853 /* Use the always pace method (bool) */
8854 RACK_OPTS_INC(tcp_rack_pace_always);
8856 rack->rc_always_pace = 1;
8858 rack->rc_always_pace = 0;
8860 case TCP_RACK_PACE_REDUCE:
8861 /* RACK Hptsi reduction factor (divisor) */
8862 RACK_OPTS_INC(tcp_rack_pace_reduce);
8864 /* Must be non-zero */
8865 rack->rc_pace_reduce = optval;
8869 case TCP_RACK_PACE_MAX_SEG:
8870 /* Max segments in a pace */
8871 RACK_OPTS_INC(tcp_rack_max_seg);
8872 rack->rc_pace_max_segs = optval;
8874 case TCP_RACK_PRR_SENDALOT:
8875 /* Allow PRR to send more than one seg */
8876 RACK_OPTS_INC(tcp_rack_prr_sendalot);
8877 rack->r_ctl.rc_prr_sendalot = optval;
8879 case TCP_RACK_MIN_TO:
8880 /* Minimum time between rack t-o's in ms */
8881 RACK_OPTS_INC(tcp_rack_min_to);
8882 rack->r_ctl.rc_min_to = optval;
8884 case TCP_RACK_EARLY_SEG:
8885 /* If early recovery max segments */
8886 RACK_OPTS_INC(tcp_rack_early_seg);
8887 rack->r_ctl.rc_early_recovery_segs = optval;
8889 case TCP_RACK_REORD_THRESH:
8890 /* RACK reorder threshold (shift amount) */
8891 RACK_OPTS_INC(tcp_rack_reord_thresh);
8892 if ((optval > 0) && (optval < 31))
8893 rack->r_ctl.rc_reorder_shift = optval;
8897 case TCP_RACK_REORD_FADE:
8898 /* Does reordering fade after ms time */
8899 RACK_OPTS_INC(tcp_rack_reord_fade);
8900 rack->r_ctl.rc_reorder_fade = optval;
8902 case TCP_RACK_TLP_THRESH:
8903 /* RACK TLP theshold i.e. srtt+(srtt/N) */
8904 RACK_OPTS_INC(tcp_rack_tlp_thresh);
8906 rack->r_ctl.rc_tlp_threshold = optval;
8910 case TCP_RACK_PKT_DELAY:
8911 /* RACK added ms i.e. rack-rtt + reord + N */
8912 RACK_OPTS_INC(tcp_rack_pkt_delay);
8913 rack->r_ctl.rc_pkt_delay = optval;
8915 case TCP_RACK_TLP_INC_VAR:
8916 /* Does TLP include rtt variance in t-o */
8917 RACK_OPTS_INC(tcp_rack_tlp_inc_var);
8918 rack->r_ctl.rc_prr_inc_var = optval;
8920 case TCP_RACK_IDLE_REDUCE_HIGH:
8921 RACK_OPTS_INC(tcp_rack_idle_reduce_high);
8923 rack->r_idle_reduce_largest = 1;
8925 rack->r_idle_reduce_largest = 0;
8929 tp->t_delayed_ack = 0;
8931 tp->t_delayed_ack = 1;
8932 if (tp->t_flags & TF_DELACK) {
8933 tp->t_flags &= ~TF_DELACK;
8934 tp->t_flags |= TF_ACKNOW;
8938 case TCP_RACK_MIN_PACE:
8939 RACK_OPTS_INC(tcp_rack_min_pace);
8941 rack->r_enforce_min_pace = 3;
8943 rack->r_enforce_min_pace = optval;
8945 case TCP_RACK_MIN_PACE_SEG:
8946 RACK_OPTS_INC(tcp_rack_min_pace_seg);
8948 rack->r_min_pace_seg_thresh = 15;
8950 rack->r_min_pace_seg_thresh = optval;
8952 case TCP_BBR_RACK_RTT_USE:
8953 if ((optval != USE_RTT_HIGH) &&
8954 (optval != USE_RTT_LOW) &&
8955 (optval != USE_RTT_AVG))
8958 rack->r_ctl.rc_rate_sample_method = optval;
8960 case TCP_DATA_AFTER_CLOSE:
8962 rack->rc_allow_data_af_clo = 1;
8964 rack->rc_allow_data_af_clo = 0;
8967 return (tcp_default_ctloutput(so, sopt, inp, tp));
8970 #ifdef NETFLIX_STATS
8971 tcp_log_socket_option(tp, sopt->sopt_name, optval, error);
8978 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
8979 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
8981 int32_t error, optval;
8984 * Because all our options are either boolean or an int, we can just
8985 * pull everything into optval and then unlock and copy. If we ever
8986 * add a option that is not a int, then this will have quite an
8987 * impact to this routine.
8989 switch (sopt->sopt_name) {
8990 case TCP_RACK_PROP_RATE:
8991 optval = rack->r_ctl.rc_prop_rate;
8994 /* RACK proportional rate reduction (bool) */
8995 optval = rack->r_ctl.rc_prop_reduce;
8997 case TCP_RACK_TLP_REDUCE:
8998 /* RACK TLP cwnd reduction (bool) */
8999 optval = rack->r_ctl.rc_tlp_cwnd_reduce;
9001 case TCP_RACK_EARLY_RECOV:
9002 /* Should recovery happen early (bool) */
9003 optval = rack->r_ctl.rc_early_recovery;
9005 case TCP_RACK_PACE_REDUCE:
9006 /* RACK Hptsi reduction factor (divisor) */
9007 optval = rack->rc_pace_reduce;
9009 case TCP_RACK_PACE_MAX_SEG:
9010 /* Max segments in a pace */
9011 optval = rack->rc_pace_max_segs;
9013 case TCP_RACK_PACE_ALWAYS:
9014 /* Use the always pace method */
9015 optval = rack->rc_always_pace;
9017 case TCP_RACK_PRR_SENDALOT:
9018 /* Allow PRR to send more than one seg */
9019 optval = rack->r_ctl.rc_prr_sendalot;
9021 case TCP_RACK_MIN_TO:
9022 /* Minimum time between rack t-o's in ms */
9023 optval = rack->r_ctl.rc_min_to;
9025 case TCP_RACK_EARLY_SEG:
9026 /* If early recovery max segments */
9027 optval = rack->r_ctl.rc_early_recovery_segs;
9029 case TCP_RACK_REORD_THRESH:
9030 /* RACK reorder threshold (shift amount) */
9031 optval = rack->r_ctl.rc_reorder_shift;
9033 case TCP_RACK_REORD_FADE:
9034 /* Does reordering fade after ms time */
9035 optval = rack->r_ctl.rc_reorder_fade;
9037 case TCP_RACK_TLP_THRESH:
9038 /* RACK TLP theshold i.e. srtt+(srtt/N) */
9039 optval = rack->r_ctl.rc_tlp_threshold;
9041 case TCP_RACK_PKT_DELAY:
9042 /* RACK added ms i.e. rack-rtt + reord + N */
9043 optval = rack->r_ctl.rc_pkt_delay;
9045 case TCP_RACK_TLP_USE:
9046 optval = rack->rack_tlp_threshold_use;
9048 case TCP_RACK_TLP_INC_VAR:
9049 /* Does TLP include rtt variance in t-o */
9050 optval = rack->r_ctl.rc_prr_inc_var;
9052 case TCP_RACK_IDLE_REDUCE_HIGH:
9053 optval = rack->r_idle_reduce_largest;
9055 case TCP_RACK_MIN_PACE:
9056 optval = rack->r_enforce_min_pace;
9058 case TCP_RACK_MIN_PACE_SEG:
9059 optval = rack->r_min_pace_seg_thresh;
9061 case TCP_BBR_RACK_RTT_USE:
9062 optval = rack->r_ctl.rc_rate_sample_method;
9065 optval = tp->t_delayed_ack;
9067 case TCP_DATA_AFTER_CLOSE:
9068 optval = rack->rc_allow_data_af_clo;
9071 return (tcp_default_ctloutput(so, sopt, inp, tp));
9075 error = sooptcopyout(sopt, &optval, sizeof optval);
9080 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp)
9082 int32_t error = EINVAL;
9083 struct tcp_rack *rack;
9085 rack = (struct tcp_rack *)tp->t_fb_ptr;
9090 if (sopt->sopt_dir == SOPT_SET) {
9091 return (rack_set_sockopt(so, sopt, inp, tp, rack));
9092 } else if (sopt->sopt_dir == SOPT_GET) {
9093 return (rack_get_sockopt(so, sopt, inp, tp, rack));
9101 struct tcp_function_block __tcp_rack = {
9102 .tfb_tcp_block_name = __XSTRING(STACKNAME),
9103 .tfb_tcp_output = rack_output,
9104 .tfb_tcp_do_segment = rack_do_segment,
9105 .tfb_tcp_hpts_do_segment = rack_hpts_do_segment,
9106 .tfb_tcp_ctloutput = rack_ctloutput,
9107 .tfb_tcp_fb_init = rack_init,
9108 .tfb_tcp_fb_fini = rack_fini,
9109 .tfb_tcp_timer_stop_all = rack_stopall,
9110 .tfb_tcp_timer_activate = rack_timer_activate,
9111 .tfb_tcp_timer_active = rack_timer_active,
9112 .tfb_tcp_timer_stop = rack_timer_stop,
9113 .tfb_tcp_rexmit_tmr = rack_remxt_tmr,
9114 .tfb_tcp_handoff_ok = rack_handoff_ok
9117 static const char *rack_stack_names[] = {
9118 __XSTRING(STACKNAME),
9120 __XSTRING(STACKALIAS),
9125 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
9127 memset(mem, 0, size);
9132 rack_dtor(void *mem, int32_t size, void *arg)
9137 static bool rack_mod_inited = false;
9140 tcp_addrack(module_t mod, int32_t type, void *data)
9147 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
9148 sizeof(struct rack_sendmap),
9149 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
9151 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
9152 sizeof(struct tcp_rack),
9153 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
9155 sysctl_ctx_init(&rack_sysctl_ctx);
9156 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
9157 SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
9159 __XSTRING(STACKNAME),
9162 if (rack_sysctl_root == NULL) {
9163 printf("Failed to add sysctl node\n");
9167 rack_init_sysctls();
9168 num_stacks = nitems(rack_stack_names);
9169 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
9170 rack_stack_names, &num_stacks);
9172 printf("Failed to register %s stack name for "
9173 "%s module\n", rack_stack_names[num_stacks],
9174 __XSTRING(MODNAME));
9175 sysctl_ctx_free(&rack_sysctl_ctx);
9177 uma_zdestroy(rack_zone);
9178 uma_zdestroy(rack_pcb_zone);
9179 rack_counter_destroy();
9180 printf("Failed to register rack module -- err:%d\n", err);
9183 rack_mod_inited = true;
9186 err = deregister_tcp_functions(&__tcp_rack, true, false);
9189 err = deregister_tcp_functions(&__tcp_rack, false, true);
9192 if (rack_mod_inited) {
9193 uma_zdestroy(rack_zone);
9194 uma_zdestroy(rack_pcb_zone);
9195 sysctl_ctx_free(&rack_sysctl_ctx);
9196 rack_counter_destroy();
9197 rack_mod_inited = false;
9202 return (EOPNOTSUPP);
9207 static moduledata_t tcp_rack = {
9208 .name = __XSTRING(MODNAME),
9209 .evhand = tcp_addrack,
9213 MODULE_VERSION(MODNAME, 1);
9214 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
9215 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);