2 * Copyright (c) 2016-2020 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include "opt_inet6.h"
32 #include "opt_ipsec.h"
33 #include "opt_tcpdebug.h"
34 #include "opt_ratelimit.h"
35 #include <sys/param.h>
37 #include <sys/module.h>
38 #include <sys/kernel.h>
40 #include <sys/hhook.h>
43 #include <sys/malloc.h>
45 #include <sys/mutex.h>
47 #include <sys/proc.h> /* for proc0 declaration */
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
53 #include <sys/qmath.h>
55 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
59 #include <sys/refcount.h>
60 #include <sys/queue.h>
61 #include <sys/tim_filter.h>
63 #include <sys/kthread.h>
64 #include <sys/kern_prefetch.h>
65 #include <sys/protosw.h>
69 #include <net/route.h>
70 #include <net/route/nhop.h>
73 #define TCPSTATES /* for logging */
75 #include <netinet/in.h>
76 #include <netinet/in_kdtrace.h>
77 #include <netinet/in_pcb.h>
78 #include <netinet/ip.h>
79 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
80 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
81 #include <netinet/ip_var.h>
82 #include <netinet/ip6.h>
83 #include <netinet6/in6_pcb.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/tcp.h>
87 #include <netinet/tcp_fsm.h>
88 #include <netinet/tcp_log_buf.h>
89 #include <netinet/tcp_seq.h>
90 #include <netinet/tcp_timer.h>
91 #include <netinet/tcp_var.h>
92 #include <netinet/tcp_hpts.h>
93 #include <netinet/tcp_ratelimit.h>
94 #include <netinet/tcpip.h>
95 #include <netinet/cc/cc.h>
96 #include <netinet/tcp_fastopen.h>
97 #include <netinet/tcp_lro.h>
98 #ifdef NETFLIX_SHARED_CWND
99 #include <netinet/tcp_shared_cwnd.h>
102 #include <netinet/tcp_debug.h>
103 #endif /* TCPDEBUG */
105 #include <netinet/tcp_offload.h>
108 #include <netinet6/tcp6_var.h>
111 #include <netipsec/ipsec_support.h>
113 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
114 #include <netipsec/ipsec.h>
115 #include <netipsec/ipsec6.h>
118 #include <netinet/udp.h>
119 #include <netinet/udp_var.h>
120 #include <machine/in_cksum.h>
123 #include <security/mac/mac_framework.h>
125 #include "sack_filter.h"
126 #include "tcp_rack.h"
127 #include "rack_bbr_common.h"
129 uma_zone_t rack_zone;
130 uma_zone_t rack_pcb_zone;
133 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t)))
136 struct sysctl_ctx_list rack_sysctl_ctx;
137 struct sysctl_oid *rack_sysctl_root;
143 * The RACK module incorporates a number of
144 * TCP ideas that have been put out into the IETF
145 * over the last few years:
146 * - Matt Mathis's Rate Halving which slowly drops
147 * the congestion window so that the ack clock can
148 * be maintained during a recovery.
149 * - Yuchung Cheng's RACK TCP (for which its named) that
150 * will stop us using the number of dup acks and instead
151 * use time as the gage of when we retransmit.
152 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
153 * of Dukkipati et.al.
154 * RACK depends on SACK, so if an endpoint arrives that
155 * cannot do SACK the state machine below will shuttle the
156 * connection back to using the "default" TCP stack that is
159 * To implement RACK the original TCP stack was first decomposed
160 * into a functional state machine with individual states
161 * for each of the possible TCP connection states. The do_segement
162 * functions role in life is to mandate the connection supports SACK
163 * initially and then assure that the RACK state matches the conenction
164 * state before calling the states do_segment function. Each
165 * state is simplified due to the fact that the original do_segment
166 * has been decomposed and we *know* what state we are in (no
167 * switches on the state) and all tests for SACK are gone. This
168 * greatly simplifies what each state does.
170 * TCP output is also over-written with a new version since it
171 * must maintain the new rack scoreboard.
174 static int32_t rack_tlp_thresh = 1;
175 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */
176 static int32_t rack_tlp_use_greater = 1;
177 static int32_t rack_reorder_thresh = 2;
178 static int32_t rack_reorder_fade = 60000; /* 0 - never fade, def 60,000
180 /* Attack threshold detections */
181 static uint32_t rack_highest_sack_thresh_seen = 0;
182 static uint32_t rack_highest_move_thresh_seen = 0;
184 static int32_t rack_pkt_delay = 1;
185 static int32_t rack_early_recovery = 1;
186 static int32_t rack_send_a_lot_in_prr = 1;
187 static int32_t rack_min_to = 1; /* Number of ms minimum timeout */
188 static int32_t rack_verbose_logging = 0;
189 static int32_t rack_ignore_data_after_close = 1;
190 static int32_t rack_enable_shared_cwnd = 0;
191 static int32_t rack_limits_scwnd = 1;
192 static int32_t rack_enable_mqueue_for_nonpaced = 0;
193 static int32_t rack_disable_prr = 0;
194 static int32_t use_rack_rr = 1;
195 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */
196 static int32_t rack_persist_min = 250; /* 250ms */
197 static int32_t rack_persist_max = 2000; /* 2 Second */
198 static int32_t rack_sack_not_required = 0; /* set to one to allow non-sack to use rack */
199 static int32_t rack_default_init_window = 0; /* Use system default */
200 static int32_t rack_limit_time_with_srtt = 0;
201 static int32_t rack_hw_pace_adjust = 0;
203 * Currently regular tcp has a rto_min of 30ms
204 * the backoff goes 12 times so that ends up
205 * being a total of 122.850 seconds before a
206 * connection is killed.
208 static uint32_t rack_def_data_window = 20;
209 static uint32_t rack_goal_bdp = 2;
210 static uint32_t rack_min_srtts = 1;
211 static uint32_t rack_min_measure_usec = 0;
212 static int32_t rack_tlp_min = 10;
213 static int32_t rack_rto_min = 30; /* 30ms same as main freebsd */
214 static int32_t rack_rto_max = 4000; /* 4 seconds */
215 static const int32_t rack_free_cache = 2;
216 static int32_t rack_hptsi_segments = 40;
217 static int32_t rack_rate_sample_method = USE_RTT_LOW;
218 static int32_t rack_pace_every_seg = 0;
219 static int32_t rack_delayed_ack_time = 200; /* 200ms */
220 static int32_t rack_slot_reduction = 4;
221 static int32_t rack_wma_divisor = 8; /* For WMA calculation */
222 static int32_t rack_cwnd_block_ends_measure = 0;
223 static int32_t rack_rwnd_block_ends_measure = 0;
225 static int32_t rack_lower_cwnd_at_tlp = 0;
226 static int32_t rack_use_proportional_reduce = 0;
227 static int32_t rack_proportional_rate = 10;
228 static int32_t rack_tlp_max_resend = 2;
229 static int32_t rack_limited_retran = 0;
230 static int32_t rack_always_send_oldest = 0;
231 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
233 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */
234 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */
235 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */
238 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */
239 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */
240 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */
241 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */
242 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */
244 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */
245 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */
246 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */
247 static uint32_t rack_probertt_use_min_rtt_exit = 0;
248 static uint32_t rack_probe_rtt_sets_cwnd = 0;
249 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */
250 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in us */
251 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */
252 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */
253 static uint32_t rack_min_probertt_hold = 200000; /* Equal to delayed ack time */
254 static uint32_t rack_probertt_filter_life = 10000000;
255 static uint32_t rack_probertt_lower_within = 10;
256 static uint32_t rack_min_rtt_movement = 250; /* Must move at least 250 useconds to count as a lowering */
257 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */
258 static int32_t rack_probertt_clear_is = 1;
259 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */
260 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */
263 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */
265 /* Timely information */
266 /* Combine these two gives the range of 'no change' to bw */
267 /* ie the up/down provide the upper and lower bound */
268 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */
269 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */
270 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */
271 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */
272 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */
273 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multipler */
274 static int32_t rack_gp_increase_per = 2; /* 2% increase in multipler */
275 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */
276 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */
277 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */
278 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */
279 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */
280 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */
281 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */
282 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */
283 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */
284 static int32_t rack_use_max_for_nobackoff = 0;
285 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */
286 static int32_t rack_timely_no_stopping = 0;
287 static int32_t rack_down_raise_thresh = 100;
288 static int32_t rack_req_segs = 1;
290 /* Weird delayed ack mode */
291 static int32_t rack_use_imac_dack = 0;
292 /* Rack specific counters */
293 counter_u64_t rack_badfr;
294 counter_u64_t rack_badfr_bytes;
295 counter_u64_t rack_rtm_prr_retran;
296 counter_u64_t rack_rtm_prr_newdata;
297 counter_u64_t rack_timestamp_mismatch;
298 counter_u64_t rack_reorder_seen;
299 counter_u64_t rack_paced_segments;
300 counter_u64_t rack_unpaced_segments;
301 counter_u64_t rack_calc_zero;
302 counter_u64_t rack_calc_nonzero;
303 counter_u64_t rack_saw_enobuf;
304 counter_u64_t rack_saw_enetunreach;
305 counter_u64_t rack_per_timer_hole;
307 /* Tail loss probe counters */
308 counter_u64_t rack_tlp_tot;
309 counter_u64_t rack_tlp_newdata;
310 counter_u64_t rack_tlp_retran;
311 counter_u64_t rack_tlp_retran_bytes;
312 counter_u64_t rack_tlp_retran_fail;
313 counter_u64_t rack_to_tot;
314 counter_u64_t rack_to_arm_rack;
315 counter_u64_t rack_to_arm_tlp;
316 counter_u64_t rack_to_alloc;
317 counter_u64_t rack_to_alloc_hard;
318 counter_u64_t rack_to_alloc_emerg;
319 counter_u64_t rack_to_alloc_limited;
320 counter_u64_t rack_alloc_limited_conns;
321 counter_u64_t rack_split_limited;
323 counter_u64_t rack_sack_proc_all;
324 counter_u64_t rack_sack_proc_short;
325 counter_u64_t rack_sack_proc_restart;
326 counter_u64_t rack_sack_attacks_detected;
327 counter_u64_t rack_sack_attacks_reversed;
328 counter_u64_t rack_sack_used_next_merge;
329 counter_u64_t rack_sack_splits;
330 counter_u64_t rack_sack_used_prev_merge;
331 counter_u64_t rack_sack_skipped_acked;
332 counter_u64_t rack_ack_total;
333 counter_u64_t rack_express_sack;
334 counter_u64_t rack_sack_total;
335 counter_u64_t rack_move_none;
336 counter_u64_t rack_move_some;
338 counter_u64_t rack_used_tlpmethod;
339 counter_u64_t rack_used_tlpmethod2;
340 counter_u64_t rack_enter_tlp_calc;
341 counter_u64_t rack_input_idle_reduces;
342 counter_u64_t rack_collapsed_win;
343 counter_u64_t rack_tlp_does_nada;
344 counter_u64_t rack_try_scwnd;
346 /* Temp CPU counters */
347 counter_u64_t rack_find_high;
349 counter_u64_t rack_progress_drops;
350 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
351 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
354 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line);
357 rack_process_ack(struct mbuf *m, struct tcphdr *th,
358 struct socket *so, struct tcpcb *tp, struct tcpopt *to,
359 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val);
361 rack_process_data(struct mbuf *m, struct tcphdr *th,
362 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
363 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
365 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
366 struct tcphdr *th, uint16_t nsegs, uint16_t type, int32_t recovery);
367 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
368 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack,
370 static struct rack_sendmap *
371 rack_check_recovery_mode(struct tcpcb *tp,
374 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th,
376 static void rack_counter_destroy(void);
378 rack_ctloutput(struct socket *so, struct sockopt *sopt,
379 struct inpcb *inp, struct tcpcb *tp);
380 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
382 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line);
384 rack_do_segment(struct mbuf *m, struct tcphdr *th,
385 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
387 static void rack_dtor(void *mem, int32_t size, void *arg);
389 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm,
390 uint32_t t, uint32_t cts);
392 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
393 uint32_t flex1, uint32_t flex2,
394 uint32_t flex3, uint32_t flex4,
395 uint32_t flex5, uint32_t flex6,
396 uint16_t flex7, uint8_t mod);
398 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
399 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, struct rack_sendmap *rsm);
400 static struct rack_sendmap *
401 rack_find_high_nonack(struct tcp_rack *rack,
402 struct rack_sendmap *rsm);
403 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
404 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
405 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
407 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
408 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
410 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
411 tcp_seq th_ack, int line);
413 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss);
414 static int32_t rack_handoff_ok(struct tcpcb *tp);
415 static int32_t rack_init(struct tcpcb *tp);
416 static void rack_init_sysctls(void);
418 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
421 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
422 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts,
423 uint8_t pass, struct rack_sendmap *hintrsm, uint32_t us_cts);
425 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
426 struct rack_sendmap *rsm);
427 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm);
428 static int32_t rack_output(struct tcpcb *tp);
431 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
432 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
433 uint32_t cts, int *moved_two);
434 static void rack_post_recovery(struct tcpcb *tp, struct tcphdr *th);
435 static void rack_remxt_tmr(struct tcpcb *tp);
437 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
438 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
439 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
440 static int32_t rack_stopall(struct tcpcb *tp);
442 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type,
444 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type);
445 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
446 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type);
448 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
449 struct rack_sendmap *rsm, uint32_t ts, int32_t * lenp);
451 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
452 struct rack_sendmap *rsm, uint32_t ts);
454 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
455 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack);
456 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
458 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
459 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
460 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
462 rack_do_closing(struct mbuf *m, struct tcphdr *th,
463 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
464 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
466 rack_do_established(struct mbuf *m, struct tcphdr *th,
467 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
468 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
470 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
471 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
472 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos);
474 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
475 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
476 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
478 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
479 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
480 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
482 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
483 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
484 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
486 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
487 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
488 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
490 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
491 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
492 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
493 struct rack_sendmap *
494 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
496 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt,
497 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt);
499 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th);
501 int32_t rack_clear_counter=0;
504 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
509 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
510 if (error || req->newptr == NULL)
513 error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
518 printf("Clearing RACK counters\n");
520 counter_u64_zero(rack_badfr);
521 counter_u64_zero(rack_badfr_bytes);
522 counter_u64_zero(rack_rtm_prr_retran);
523 counter_u64_zero(rack_rtm_prr_newdata);
524 counter_u64_zero(rack_timestamp_mismatch);
525 counter_u64_zero(rack_reorder_seen);
526 counter_u64_zero(rack_tlp_tot);
527 counter_u64_zero(rack_tlp_newdata);
528 counter_u64_zero(rack_tlp_retran);
529 counter_u64_zero(rack_tlp_retran_bytes);
530 counter_u64_zero(rack_tlp_retran_fail);
531 counter_u64_zero(rack_to_tot);
532 counter_u64_zero(rack_to_arm_rack);
533 counter_u64_zero(rack_to_arm_tlp);
534 counter_u64_zero(rack_paced_segments);
535 counter_u64_zero(rack_calc_zero);
536 counter_u64_zero(rack_calc_nonzero);
537 counter_u64_zero(rack_unpaced_segments);
538 counter_u64_zero(rack_saw_enobuf);
539 counter_u64_zero(rack_saw_enetunreach);
540 counter_u64_zero(rack_per_timer_hole);
541 counter_u64_zero(rack_to_alloc_hard);
542 counter_u64_zero(rack_to_alloc_emerg);
543 counter_u64_zero(rack_sack_proc_all);
544 counter_u64_zero(rack_sack_proc_short);
545 counter_u64_zero(rack_sack_proc_restart);
546 counter_u64_zero(rack_to_alloc);
547 counter_u64_zero(rack_to_alloc_limited);
548 counter_u64_zero(rack_alloc_limited_conns);
549 counter_u64_zero(rack_split_limited);
550 counter_u64_zero(rack_find_high);
551 counter_u64_zero(rack_sack_attacks_detected);
552 counter_u64_zero(rack_sack_attacks_reversed);
553 counter_u64_zero(rack_sack_used_next_merge);
554 counter_u64_zero(rack_sack_used_prev_merge);
555 counter_u64_zero(rack_sack_splits);
556 counter_u64_zero(rack_sack_skipped_acked);
557 counter_u64_zero(rack_ack_total);
558 counter_u64_zero(rack_express_sack);
559 counter_u64_zero(rack_sack_total);
560 counter_u64_zero(rack_move_none);
561 counter_u64_zero(rack_move_some);
562 counter_u64_zero(rack_used_tlpmethod);
563 counter_u64_zero(rack_used_tlpmethod2);
564 counter_u64_zero(rack_enter_tlp_calc);
565 counter_u64_zero(rack_progress_drops);
566 counter_u64_zero(rack_tlp_does_nada);
567 counter_u64_zero(rack_try_scwnd);
568 counter_u64_zero(rack_collapsed_win);
570 rack_clear_counter = 0;
575 rack_init_sysctls(void)
577 struct sysctl_oid *rack_counters;
578 struct sysctl_oid *rack_attack;
579 struct sysctl_oid *rack_pacing;
580 struct sysctl_oid *rack_timely;
581 struct sysctl_oid *rack_timers;
582 struct sysctl_oid *rack_tlp;
583 struct sysctl_oid *rack_misc;
584 struct sysctl_oid *rack_measure;
585 struct sysctl_oid *rack_probertt;
587 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
588 SYSCTL_CHILDREN(rack_sysctl_root),
591 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
592 "Rack Sack Attack Counters and Controls");
593 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
594 SYSCTL_CHILDREN(rack_sysctl_root),
597 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
599 SYSCTL_ADD_S32(&rack_sysctl_ctx,
600 SYSCTL_CHILDREN(rack_sysctl_root),
601 OID_AUTO, "rate_sample_method", CTLFLAG_RW,
602 &rack_rate_sample_method , USE_RTT_LOW,
603 "What method should we use for rate sampling 0=high, 1=low ");
604 /* Probe rtt related controls */
605 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
606 SYSCTL_CHILDREN(rack_sysctl_root),
609 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
610 "ProbeRTT related Controls");
611 SYSCTL_ADD_U16(&rack_sysctl_ctx,
612 SYSCTL_CHILDREN(rack_probertt),
613 OID_AUTO, "exit_per_hpb", CTLFLAG_RW,
614 &rack_atexit_prtt_hbp, 130,
615 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%");
616 SYSCTL_ADD_U16(&rack_sysctl_ctx,
617 SYSCTL_CHILDREN(rack_probertt),
618 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW,
619 &rack_atexit_prtt, 130,
620 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%");
621 SYSCTL_ADD_U16(&rack_sysctl_ctx,
622 SYSCTL_CHILDREN(rack_probertt),
623 OID_AUTO, "gp_per_mul", CTLFLAG_RW,
624 &rack_per_of_gp_probertt, 60,
625 "What percentage of goodput do we pace at in probertt");
626 SYSCTL_ADD_U16(&rack_sysctl_ctx,
627 SYSCTL_CHILDREN(rack_probertt),
628 OID_AUTO, "gp_per_reduce", CTLFLAG_RW,
629 &rack_per_of_gp_probertt_reduce, 10,
630 "What percentage of goodput do we reduce every gp_srtt");
631 SYSCTL_ADD_U16(&rack_sysctl_ctx,
632 SYSCTL_CHILDREN(rack_probertt),
633 OID_AUTO, "gp_per_low", CTLFLAG_RW,
634 &rack_per_of_gp_lowthresh, 40,
635 "What percentage of goodput do we allow the multiplier to fall to");
636 SYSCTL_ADD_U32(&rack_sysctl_ctx,
637 SYSCTL_CHILDREN(rack_probertt),
638 OID_AUTO, "time_between", CTLFLAG_RW,
639 & rack_time_between_probertt, 96000000,
640 "How many useconds between the lowest rtt falling must past before we enter probertt");
641 SYSCTL_ADD_U32(&rack_sysctl_ctx,
642 SYSCTL_CHILDREN(rack_probertt),
643 OID_AUTO, "safety", CTLFLAG_RW,
644 &rack_probe_rtt_safety_val, 2000000,
645 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)");
646 SYSCTL_ADD_U32(&rack_sysctl_ctx,
647 SYSCTL_CHILDREN(rack_probertt),
648 OID_AUTO, "sets_cwnd", CTLFLAG_RW,
649 &rack_probe_rtt_sets_cwnd, 0,
650 "Do we set the cwnd too (if always_lower is on)");
651 SYSCTL_ADD_U32(&rack_sysctl_ctx,
652 SYSCTL_CHILDREN(rack_probertt),
653 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW,
654 &rack_max_drain_wait, 2,
655 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal");
656 SYSCTL_ADD_U32(&rack_sysctl_ctx,
657 SYSCTL_CHILDREN(rack_probertt),
658 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW,
660 "We must drain this many gp_srtt's waiting for flight to reach goal");
661 SYSCTL_ADD_U32(&rack_sysctl_ctx,
662 SYSCTL_CHILDREN(rack_probertt),
663 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW,
664 &rack_probertt_use_min_rtt_entry, 1,
665 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry");
666 SYSCTL_ADD_U32(&rack_sysctl_ctx,
667 SYSCTL_CHILDREN(rack_probertt),
668 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW,
669 &rack_probertt_use_min_rtt_exit, 0,
670 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt");
671 SYSCTL_ADD_U32(&rack_sysctl_ctx,
672 SYSCTL_CHILDREN(rack_probertt),
673 OID_AUTO, "length_div", CTLFLAG_RW,
674 &rack_probertt_gpsrtt_cnt_div, 0,
675 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)");
676 SYSCTL_ADD_U32(&rack_sysctl_ctx,
677 SYSCTL_CHILDREN(rack_probertt),
678 OID_AUTO, "length_mul", CTLFLAG_RW,
679 &rack_probertt_gpsrtt_cnt_mul, 0,
680 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)");
681 SYSCTL_ADD_U32(&rack_sysctl_ctx,
682 SYSCTL_CHILDREN(rack_probertt),
683 OID_AUTO, "holdtim_at_target", CTLFLAG_RW,
684 &rack_min_probertt_hold, 200000,
685 "What is the minimum time we hold probertt at target");
686 SYSCTL_ADD_U32(&rack_sysctl_ctx,
687 SYSCTL_CHILDREN(rack_probertt),
688 OID_AUTO, "filter_life", CTLFLAG_RW,
689 &rack_probertt_filter_life, 10000000,
690 "What is the time for the filters life in useconds");
691 SYSCTL_ADD_U32(&rack_sysctl_ctx,
692 SYSCTL_CHILDREN(rack_probertt),
693 OID_AUTO, "lower_within", CTLFLAG_RW,
694 &rack_probertt_lower_within, 10,
695 "If the rtt goes lower within this percentage of the time, go into probe-rtt");
696 SYSCTL_ADD_U32(&rack_sysctl_ctx,
697 SYSCTL_CHILDREN(rack_probertt),
698 OID_AUTO, "must_move", CTLFLAG_RW,
699 &rack_min_rtt_movement, 250,
700 "How much is the minimum movement in rtt to count as a drop for probertt purposes");
701 SYSCTL_ADD_U32(&rack_sysctl_ctx,
702 SYSCTL_CHILDREN(rack_probertt),
703 OID_AUTO, "clear_is_cnts", CTLFLAG_RW,
704 &rack_probertt_clear_is, 1,
705 "Do we clear I/S counts on exiting probe-rtt");
706 SYSCTL_ADD_S32(&rack_sysctl_ctx,
707 SYSCTL_CHILDREN(rack_probertt),
708 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW,
709 &rack_max_drain_hbp, 1,
710 "How many extra drain gpsrtt's do we get in highly buffered paths");
711 SYSCTL_ADD_S32(&rack_sysctl_ctx,
712 SYSCTL_CHILDREN(rack_probertt),
713 OID_AUTO, "hbp_threshold", CTLFLAG_RW,
715 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold");
716 /* Pacing related sysctls */
717 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
718 SYSCTL_CHILDREN(rack_sysctl_root),
721 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
722 "Pacing related Controls");
723 SYSCTL_ADD_S32(&rack_sysctl_ctx,
724 SYSCTL_CHILDREN(rack_pacing),
725 OID_AUTO, "max_pace_over", CTLFLAG_RW,
726 &rack_max_per_above, 30,
727 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)");
728 SYSCTL_ADD_S32(&rack_sysctl_ctx,
729 SYSCTL_CHILDREN(rack_pacing),
730 OID_AUTO, "pace_to_one", CTLFLAG_RW,
731 &rack_pace_one_seg, 0,
732 "Do we allow low b/w pacing of 1MSS instead of two");
733 SYSCTL_ADD_S32(&rack_sysctl_ctx,
734 SYSCTL_CHILDREN(rack_pacing),
735 OID_AUTO, "limit_wsrtt", CTLFLAG_RW,
736 &rack_limit_time_with_srtt, 0,
737 "Do we limit pacing time based on srtt");
738 SYSCTL_ADD_S32(&rack_sysctl_ctx,
739 SYSCTL_CHILDREN(rack_pacing),
740 OID_AUTO, "init_win", CTLFLAG_RW,
741 &rack_default_init_window, 0,
742 "Do we have a rack initial window 0 = system default");
743 SYSCTL_ADD_U32(&rack_sysctl_ctx,
744 SYSCTL_CHILDREN(rack_pacing),
745 OID_AUTO, "hw_pacing_adjust", CTLFLAG_RW,
746 &rack_hw_pace_adjust, 0,
747 "What percentage do we raise the MSS by (11 = 1.1%)");
748 SYSCTL_ADD_U16(&rack_sysctl_ctx,
749 SYSCTL_CHILDREN(rack_pacing),
750 OID_AUTO, "gp_per_ss", CTLFLAG_RW,
751 &rack_per_of_gp_ss, 250,
752 "If non zero, what percentage of goodput to pace at in slow start");
753 SYSCTL_ADD_U16(&rack_sysctl_ctx,
754 SYSCTL_CHILDREN(rack_pacing),
755 OID_AUTO, "gp_per_ca", CTLFLAG_RW,
756 &rack_per_of_gp_ca, 150,
757 "If non zero, what percentage of goodput to pace at in congestion avoidance");
758 SYSCTL_ADD_U16(&rack_sysctl_ctx,
759 SYSCTL_CHILDREN(rack_pacing),
760 OID_AUTO, "gp_per_rec", CTLFLAG_RW,
761 &rack_per_of_gp_rec, 200,
762 "If non zero, what percentage of goodput to pace at in recovery");
763 SYSCTL_ADD_S32(&rack_sysctl_ctx,
764 SYSCTL_CHILDREN(rack_pacing),
765 OID_AUTO, "pace_max_seg", CTLFLAG_RW,
766 &rack_hptsi_segments, 40,
767 "What size is the max for TSO segments in pacing and burst mitigation");
768 SYSCTL_ADD_S32(&rack_sysctl_ctx,
769 SYSCTL_CHILDREN(rack_pacing),
770 OID_AUTO, "burst_reduces", CTLFLAG_RW,
771 &rack_slot_reduction, 4,
772 "When doing only burst mitigation what is the reduce divisor");
773 SYSCTL_ADD_S32(&rack_sysctl_ctx,
774 SYSCTL_CHILDREN(rack_sysctl_root),
775 OID_AUTO, "use_pacing", CTLFLAG_RW,
776 &rack_pace_every_seg, 0,
777 "If set we use pacing, if clear we use only the original burst mitigation");
779 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
780 SYSCTL_CHILDREN(rack_sysctl_root),
783 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
784 "Rack Timely RTT Controls");
785 /* Timely based GP dynmics */
786 SYSCTL_ADD_S32(&rack_sysctl_ctx,
787 SYSCTL_CHILDREN(rack_timely),
788 OID_AUTO, "upper", CTLFLAG_RW,
789 &rack_gp_per_bw_mul_up, 2,
790 "Rack timely upper range for equal b/w (in percentage)");
791 SYSCTL_ADD_S32(&rack_sysctl_ctx,
792 SYSCTL_CHILDREN(rack_timely),
793 OID_AUTO, "lower", CTLFLAG_RW,
794 &rack_gp_per_bw_mul_down, 4,
795 "Rack timely lower range for equal b/w (in percentage)");
796 SYSCTL_ADD_S32(&rack_sysctl_ctx,
797 SYSCTL_CHILDREN(rack_timely),
798 OID_AUTO, "rtt_max_mul", CTLFLAG_RW,
799 &rack_gp_rtt_maxmul, 3,
800 "Rack timely multipler of lowest rtt for rtt_max");
801 SYSCTL_ADD_S32(&rack_sysctl_ctx,
802 SYSCTL_CHILDREN(rack_timely),
803 OID_AUTO, "rtt_min_div", CTLFLAG_RW,
804 &rack_gp_rtt_mindiv, 4,
805 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt");
806 SYSCTL_ADD_S32(&rack_sysctl_ctx,
807 SYSCTL_CHILDREN(rack_timely),
808 OID_AUTO, "rtt_min_mul", CTLFLAG_RW,
809 &rack_gp_rtt_minmul, 1,
810 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt");
811 SYSCTL_ADD_S32(&rack_sysctl_ctx,
812 SYSCTL_CHILDREN(rack_timely),
813 OID_AUTO, "decrease", CTLFLAG_RW,
814 &rack_gp_decrease_per, 20,
815 "Rack timely decrease percentage of our GP multiplication factor");
816 SYSCTL_ADD_S32(&rack_sysctl_ctx,
817 SYSCTL_CHILDREN(rack_timely),
818 OID_AUTO, "increase", CTLFLAG_RW,
819 &rack_gp_increase_per, 2,
820 "Rack timely increase perentage of our GP multiplication factor");
821 SYSCTL_ADD_S32(&rack_sysctl_ctx,
822 SYSCTL_CHILDREN(rack_timely),
823 OID_AUTO, "lowerbound", CTLFLAG_RW,
824 &rack_per_lower_bound, 50,
825 "Rack timely lowest percentage we allow GP multiplier to fall to");
826 SYSCTL_ADD_S32(&rack_sysctl_ctx,
827 SYSCTL_CHILDREN(rack_timely),
828 OID_AUTO, "upperboundss", CTLFLAG_RW,
829 &rack_per_upper_bound_ss, 0,
830 "Rack timely higest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)");
831 SYSCTL_ADD_S32(&rack_sysctl_ctx,
832 SYSCTL_CHILDREN(rack_timely),
833 OID_AUTO, "upperboundca", CTLFLAG_RW,
834 &rack_per_upper_bound_ca, 0,
835 "Rack timely higest percentage we allow GP multiplier to CA raise to (0 is no upperbound)");
836 SYSCTL_ADD_S32(&rack_sysctl_ctx,
837 SYSCTL_CHILDREN(rack_timely),
838 OID_AUTO, "dynamicgp", CTLFLAG_RW,
840 "Rack timely do we enable dynmaic timely goodput by default");
841 SYSCTL_ADD_S32(&rack_sysctl_ctx,
842 SYSCTL_CHILDREN(rack_timely),
843 OID_AUTO, "no_rec_red", CTLFLAG_RW,
844 &rack_gp_no_rec_chg, 1,
845 "Rack timely do we prohibit the recovery multiplier from being lowered");
846 SYSCTL_ADD_S32(&rack_sysctl_ctx,
847 SYSCTL_CHILDREN(rack_timely),
848 OID_AUTO, "red_clear_cnt", CTLFLAG_RW,
849 &rack_timely_dec_clear, 6,
850 "Rack timely what threshold do we count to before another boost during b/w decent");
851 SYSCTL_ADD_S32(&rack_sysctl_ctx,
852 SYSCTL_CHILDREN(rack_timely),
853 OID_AUTO, "max_push_rise", CTLFLAG_RW,
854 &rack_timely_max_push_rise, 3,
855 "Rack timely how many times do we push up with b/w increase");
856 SYSCTL_ADD_S32(&rack_sysctl_ctx,
857 SYSCTL_CHILDREN(rack_timely),
858 OID_AUTO, "max_push_drop", CTLFLAG_RW,
859 &rack_timely_max_push_drop, 3,
860 "Rack timely how many times do we push back on b/w decent");
861 SYSCTL_ADD_S32(&rack_sysctl_ctx,
862 SYSCTL_CHILDREN(rack_timely),
863 OID_AUTO, "min_segs", CTLFLAG_RW,
864 &rack_timely_min_segs, 4,
865 "Rack timely when setting the cwnd what is the min num segments");
866 SYSCTL_ADD_S32(&rack_sysctl_ctx,
867 SYSCTL_CHILDREN(rack_timely),
868 OID_AUTO, "noback_max", CTLFLAG_RW,
869 &rack_use_max_for_nobackoff, 0,
870 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min");
871 SYSCTL_ADD_S32(&rack_sysctl_ctx,
872 SYSCTL_CHILDREN(rack_timely),
873 OID_AUTO, "interim_timely_only", CTLFLAG_RW,
874 &rack_timely_int_timely_only, 0,
875 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)");
876 SYSCTL_ADD_S32(&rack_sysctl_ctx,
877 SYSCTL_CHILDREN(rack_timely),
878 OID_AUTO, "nonstop", CTLFLAG_RW,
879 &rack_timely_no_stopping, 0,
880 "Rack timely don't stop increase");
881 SYSCTL_ADD_S32(&rack_sysctl_ctx,
882 SYSCTL_CHILDREN(rack_timely),
883 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW,
884 &rack_down_raise_thresh, 100,
885 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)");
886 SYSCTL_ADD_S32(&rack_sysctl_ctx,
887 SYSCTL_CHILDREN(rack_timely),
888 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW,
890 "Bottom dragging if not these many segments outstanding and room");
892 /* TLP and Rack related parameters */
893 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
894 SYSCTL_CHILDREN(rack_sysctl_root),
897 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
898 "TLP and Rack related Controls");
899 SYSCTL_ADD_S32(&rack_sysctl_ctx,
900 SYSCTL_CHILDREN(rack_tlp),
901 OID_AUTO, "use_rrr", CTLFLAG_RW,
903 "Do we use Rack Rapid Recovery");
904 SYSCTL_ADD_S32(&rack_sysctl_ctx,
905 SYSCTL_CHILDREN(rack_tlp),
906 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW,
907 &rack_non_rxt_use_cr, 0,
908 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk");
909 SYSCTL_ADD_S32(&rack_sysctl_ctx,
910 SYSCTL_CHILDREN(rack_tlp),
911 OID_AUTO, "tlpmethod", CTLFLAG_RW,
912 &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
913 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
914 SYSCTL_ADD_S32(&rack_sysctl_ctx,
915 SYSCTL_CHILDREN(rack_tlp),
916 OID_AUTO, "limit", CTLFLAG_RW,
918 "How many TLP's can be sent without sending new data");
919 SYSCTL_ADD_S32(&rack_sysctl_ctx,
920 SYSCTL_CHILDREN(rack_tlp),
921 OID_AUTO, "use_greater", CTLFLAG_RW,
922 &rack_tlp_use_greater, 1,
923 "Should we use the rack_rtt time if its greater than srtt");
924 SYSCTL_ADD_S32(&rack_sysctl_ctx,
925 SYSCTL_CHILDREN(rack_tlp),
926 OID_AUTO, "tlpminto", CTLFLAG_RW,
928 "TLP minimum timeout per the specification (10ms)");
929 SYSCTL_ADD_S32(&rack_sysctl_ctx,
930 SYSCTL_CHILDREN(rack_tlp),
931 OID_AUTO, "send_oldest", CTLFLAG_RW,
932 &rack_always_send_oldest, 0,
933 "Should we always send the oldest TLP and RACK-TLP");
934 SYSCTL_ADD_S32(&rack_sysctl_ctx,
935 SYSCTL_CHILDREN(rack_tlp),
936 OID_AUTO, "rack_tlimit", CTLFLAG_RW,
937 &rack_limited_retran, 0,
938 "How many times can a rack timeout drive out sends");
939 SYSCTL_ADD_S32(&rack_sysctl_ctx,
940 SYSCTL_CHILDREN(rack_tlp),
941 OID_AUTO, "tlp_retry", CTLFLAG_RW,
942 &rack_tlp_max_resend, 2,
943 "How many times does TLP retry a single segment or multiple with no ACK");
944 SYSCTL_ADD_S32(&rack_sysctl_ctx,
945 SYSCTL_CHILDREN(rack_tlp),
946 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
947 &rack_lower_cwnd_at_tlp, 0,
948 "When a TLP completes a retran should we enter recovery");
949 SYSCTL_ADD_S32(&rack_sysctl_ctx,
950 SYSCTL_CHILDREN(rack_tlp),
951 OID_AUTO, "reorder_thresh", CTLFLAG_RW,
952 &rack_reorder_thresh, 2,
953 "What factor for rack will be added when seeing reordering (shift right)");
954 SYSCTL_ADD_S32(&rack_sysctl_ctx,
955 SYSCTL_CHILDREN(rack_tlp),
956 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
958 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
959 SYSCTL_ADD_S32(&rack_sysctl_ctx,
960 SYSCTL_CHILDREN(rack_tlp),
961 OID_AUTO, "reorder_fade", CTLFLAG_RW,
962 &rack_reorder_fade, 0,
963 "Does reorder detection fade, if so how many ms (0 means never)");
964 SYSCTL_ADD_S32(&rack_sysctl_ctx,
965 SYSCTL_CHILDREN(rack_tlp),
966 OID_AUTO, "pktdelay", CTLFLAG_RW,
968 "Extra RACK time (in ms) besides reordering thresh");
970 /* Timer related controls */
971 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
972 SYSCTL_CHILDREN(rack_sysctl_root),
975 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
976 "Timer related controls");
977 SYSCTL_ADD_U32(&rack_sysctl_ctx,
978 SYSCTL_CHILDREN(rack_timers),
979 OID_AUTO, "persmin", CTLFLAG_RW,
980 &rack_persist_min, 250,
981 "What is the minimum time in milliseconds between persists");
982 SYSCTL_ADD_U32(&rack_sysctl_ctx,
983 SYSCTL_CHILDREN(rack_timers),
984 OID_AUTO, "persmax", CTLFLAG_RW,
985 &rack_persist_max, 2000,
986 "What is the largest delay in milliseconds between persists");
987 SYSCTL_ADD_S32(&rack_sysctl_ctx,
988 SYSCTL_CHILDREN(rack_timers),
989 OID_AUTO, "delayed_ack", CTLFLAG_RW,
990 &rack_delayed_ack_time, 200,
991 "Delayed ack time (200ms)");
992 SYSCTL_ADD_S32(&rack_sysctl_ctx,
993 SYSCTL_CHILDREN(rack_timers),
994 OID_AUTO, "minrto", CTLFLAG_RW,
996 "Minimum RTO in ms -- set with caution below 1000 due to TLP");
997 SYSCTL_ADD_S32(&rack_sysctl_ctx,
998 SYSCTL_CHILDREN(rack_timers),
999 OID_AUTO, "maxrto", CTLFLAG_RW,
1001 "Maxiumum RTO in ms -- should be at least as large as min_rto");
1002 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1003 SYSCTL_CHILDREN(rack_timers),
1004 OID_AUTO, "minto", CTLFLAG_RW,
1006 "Minimum rack timeout in milliseconds");
1007 /* Measure controls */
1008 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1009 SYSCTL_CHILDREN(rack_sysctl_root),
1012 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1013 "Measure related controls");
1014 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1015 SYSCTL_CHILDREN(rack_measure),
1016 OID_AUTO, "wma_divisor", CTLFLAG_RW,
1017 &rack_wma_divisor, 8,
1018 "When doing b/w calculation what is the divisor for the WMA");
1019 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1020 SYSCTL_CHILDREN(rack_measure),
1021 OID_AUTO, "end_cwnd", CTLFLAG_RW,
1022 &rack_cwnd_block_ends_measure, 0,
1023 "Does a cwnd just-return end the measurement window (app limited)");
1024 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1025 SYSCTL_CHILDREN(rack_measure),
1026 OID_AUTO, "end_rwnd", CTLFLAG_RW,
1027 &rack_rwnd_block_ends_measure, 0,
1028 "Does an rwnd just-return end the measurement window (app limited -- not persists)");
1029 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1030 SYSCTL_CHILDREN(rack_measure),
1031 OID_AUTO, "min_target", CTLFLAG_RW,
1032 &rack_def_data_window, 20,
1033 "What is the minimum target window (in mss) for a GP measurements");
1034 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1035 SYSCTL_CHILDREN(rack_measure),
1036 OID_AUTO, "goal_bdp", CTLFLAG_RW,
1038 "What is the goal BDP to measure");
1039 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1040 SYSCTL_CHILDREN(rack_measure),
1041 OID_AUTO, "min_srtts", CTLFLAG_RW,
1043 "What is the goal BDP to measure");
1044 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1045 SYSCTL_CHILDREN(rack_measure),
1046 OID_AUTO, "min_measure_tim", CTLFLAG_RW,
1047 &rack_min_measure_usec, 0,
1048 "What is the Minimum time time for a measurement if 0, this is off");
1049 /* Misc rack controls */
1050 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1051 SYSCTL_CHILDREN(rack_sysctl_root),
1054 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1055 "Misc related controls");
1056 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1057 SYSCTL_CHILDREN(rack_misc),
1058 OID_AUTO, "shared_cwnd", CTLFLAG_RW,
1059 &rack_enable_shared_cwnd, 0,
1060 "Should RACK try to use the shared cwnd on connections where allowed");
1061 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1062 SYSCTL_CHILDREN(rack_misc),
1063 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW,
1064 &rack_limits_scwnd, 1,
1065 "Should RACK place low end time limits on the shared cwnd feature");
1066 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1067 SYSCTL_CHILDREN(rack_misc),
1068 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW,
1069 &rack_enable_mqueue_for_nonpaced, 0,
1070 "Should RACK use mbuf queuing for non-paced connections");
1071 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1072 SYSCTL_CHILDREN(rack_misc),
1073 OID_AUTO, "iMac_dack", CTLFLAG_RW,
1074 &rack_use_imac_dack, 0,
1075 "Should RACK try to emulate iMac delayed ack");
1076 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1077 SYSCTL_CHILDREN(rack_misc),
1078 OID_AUTO, "no_prr", CTLFLAG_RW,
1079 &rack_disable_prr, 0,
1080 "Should RACK not use prr and only pace (must have pacing on)");
1081 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1082 SYSCTL_CHILDREN(rack_misc),
1083 OID_AUTO, "bb_verbose", CTLFLAG_RW,
1084 &rack_verbose_logging, 0,
1085 "Should RACK black box logging be verbose");
1086 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1087 SYSCTL_CHILDREN(rack_misc),
1088 OID_AUTO, "data_after_close", CTLFLAG_RW,
1089 &rack_ignore_data_after_close, 1,
1090 "Do we hold off sending a RST until all pending data is ack'd");
1091 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1092 SYSCTL_CHILDREN(rack_misc),
1093 OID_AUTO, "no_sack_needed", CTLFLAG_RW,
1094 &rack_sack_not_required, 0,
1095 "Do we allow rack to run on connections not supporting SACK");
1096 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1097 SYSCTL_CHILDREN(rack_misc),
1098 OID_AUTO, "recovery_loss_prop", CTLFLAG_RW,
1099 &rack_use_proportional_reduce, 0,
1100 "Should we proportionaly reduce cwnd based on the number of losses ");
1101 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1102 SYSCTL_CHILDREN(rack_misc),
1103 OID_AUTO, "recovery_prop", CTLFLAG_RW,
1104 &rack_proportional_rate, 10,
1105 "What percent reduction per loss");
1106 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1107 SYSCTL_CHILDREN(rack_misc),
1108 OID_AUTO, "prr_sendalot", CTLFLAG_RW,
1109 &rack_send_a_lot_in_prr, 1,
1110 "Send a lot in prr");
1111 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1112 SYSCTL_CHILDREN(rack_misc),
1113 OID_AUTO, "earlyrecovery", CTLFLAG_RW,
1114 &rack_early_recovery, 1,
1115 "Do we do early recovery with rack");
1116 /* Sack Attacker detection stuff */
1117 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1118 SYSCTL_CHILDREN(rack_attack),
1119 OID_AUTO, "detect_highsackratio", CTLFLAG_RW,
1120 &rack_highest_sack_thresh_seen, 0,
1121 "Highest sack to ack ratio seen");
1122 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1123 SYSCTL_CHILDREN(rack_attack),
1124 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW,
1125 &rack_highest_move_thresh_seen, 0,
1126 "Highest move to non-move ratio seen");
1127 rack_ack_total = counter_u64_alloc(M_WAITOK);
1128 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1129 SYSCTL_CHILDREN(rack_attack),
1130 OID_AUTO, "acktotal", CTLFLAG_RD,
1132 "Total number of Ack's");
1133 rack_express_sack = counter_u64_alloc(M_WAITOK);
1134 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1135 SYSCTL_CHILDREN(rack_attack),
1136 OID_AUTO, "exp_sacktotal", CTLFLAG_RD,
1138 "Total expresss number of Sack's");
1139 rack_sack_total = counter_u64_alloc(M_WAITOK);
1140 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1141 SYSCTL_CHILDREN(rack_attack),
1142 OID_AUTO, "sacktotal", CTLFLAG_RD,
1144 "Total number of SACKs");
1145 rack_move_none = counter_u64_alloc(M_WAITOK);
1146 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1147 SYSCTL_CHILDREN(rack_attack),
1148 OID_AUTO, "move_none", CTLFLAG_RD,
1150 "Total number of SACK index reuse of postions under threshold");
1151 rack_move_some = counter_u64_alloc(M_WAITOK);
1152 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1153 SYSCTL_CHILDREN(rack_attack),
1154 OID_AUTO, "move_some", CTLFLAG_RD,
1156 "Total number of SACK index reuse of postions over threshold");
1157 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK);
1158 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1159 SYSCTL_CHILDREN(rack_attack),
1160 OID_AUTO, "attacks", CTLFLAG_RD,
1161 &rack_sack_attacks_detected,
1162 "Total number of SACK attackers that had sack disabled");
1163 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK);
1164 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1165 SYSCTL_CHILDREN(rack_attack),
1166 OID_AUTO, "reversed", CTLFLAG_RD,
1167 &rack_sack_attacks_reversed,
1168 "Total number of SACK attackers that were later determined false positive");
1169 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK);
1170 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1171 SYSCTL_CHILDREN(rack_attack),
1172 OID_AUTO, "nextmerge", CTLFLAG_RD,
1173 &rack_sack_used_next_merge,
1174 "Total number of times we used the next merge");
1175 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK);
1176 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1177 SYSCTL_CHILDREN(rack_attack),
1178 OID_AUTO, "prevmerge", CTLFLAG_RD,
1179 &rack_sack_used_prev_merge,
1180 "Total number of times we used the prev merge");
1182 rack_badfr = counter_u64_alloc(M_WAITOK);
1183 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1184 SYSCTL_CHILDREN(rack_counters),
1185 OID_AUTO, "badfr", CTLFLAG_RD,
1186 &rack_badfr, "Total number of bad FRs");
1187 rack_badfr_bytes = counter_u64_alloc(M_WAITOK);
1188 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1189 SYSCTL_CHILDREN(rack_counters),
1190 OID_AUTO, "badfr_bytes", CTLFLAG_RD,
1191 &rack_badfr_bytes, "Total number of bad FRs");
1192 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK);
1193 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1194 SYSCTL_CHILDREN(rack_counters),
1195 OID_AUTO, "prrsndret", CTLFLAG_RD,
1196 &rack_rtm_prr_retran,
1197 "Total number of prr based retransmits");
1198 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK);
1199 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1200 SYSCTL_CHILDREN(rack_counters),
1201 OID_AUTO, "prrsndnew", CTLFLAG_RD,
1202 &rack_rtm_prr_newdata,
1203 "Total number of prr based new transmits");
1204 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK);
1205 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1206 SYSCTL_CHILDREN(rack_counters),
1207 OID_AUTO, "tsnf", CTLFLAG_RD,
1208 &rack_timestamp_mismatch,
1209 "Total number of timestamps that we could not find the reported ts");
1210 rack_find_high = counter_u64_alloc(M_WAITOK);
1211 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1212 SYSCTL_CHILDREN(rack_counters),
1213 OID_AUTO, "findhigh", CTLFLAG_RD,
1215 "Total number of FIN causing find-high");
1216 rack_reorder_seen = counter_u64_alloc(M_WAITOK);
1217 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1218 SYSCTL_CHILDREN(rack_counters),
1219 OID_AUTO, "reordering", CTLFLAG_RD,
1221 "Total number of times we added delay due to reordering");
1222 rack_tlp_tot = counter_u64_alloc(M_WAITOK);
1223 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1224 SYSCTL_CHILDREN(rack_counters),
1225 OID_AUTO, "tlp_to_total", CTLFLAG_RD,
1227 "Total number of tail loss probe expirations");
1228 rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
1229 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1230 SYSCTL_CHILDREN(rack_counters),
1231 OID_AUTO, "tlp_new", CTLFLAG_RD,
1233 "Total number of tail loss probe sending new data");
1234 rack_tlp_retran = counter_u64_alloc(M_WAITOK);
1235 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1236 SYSCTL_CHILDREN(rack_counters),
1237 OID_AUTO, "tlp_retran", CTLFLAG_RD,
1239 "Total number of tail loss probe sending retransmitted data");
1240 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
1241 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1242 SYSCTL_CHILDREN(rack_counters),
1243 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
1244 &rack_tlp_retran_bytes,
1245 "Total bytes of tail loss probe sending retransmitted data");
1246 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK);
1247 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1248 SYSCTL_CHILDREN(rack_counters),
1249 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD,
1250 &rack_tlp_retran_fail,
1251 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)");
1252 rack_to_tot = counter_u64_alloc(M_WAITOK);
1253 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1254 SYSCTL_CHILDREN(rack_counters),
1255 OID_AUTO, "rack_to_tot", CTLFLAG_RD,
1257 "Total number of times the rack to expired");
1258 rack_to_arm_rack = counter_u64_alloc(M_WAITOK);
1259 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1260 SYSCTL_CHILDREN(rack_counters),
1261 OID_AUTO, "arm_rack", CTLFLAG_RD,
1263 "Total number of times the rack timer armed");
1264 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK);
1265 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1266 SYSCTL_CHILDREN(rack_counters),
1267 OID_AUTO, "arm_tlp", CTLFLAG_RD,
1269 "Total number of times the tlp timer armed");
1270 rack_calc_zero = counter_u64_alloc(M_WAITOK);
1271 rack_calc_nonzero = counter_u64_alloc(M_WAITOK);
1272 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1273 SYSCTL_CHILDREN(rack_counters),
1274 OID_AUTO, "calc_zero", CTLFLAG_RD,
1276 "Total number of times pacing time worked out to zero");
1277 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1278 SYSCTL_CHILDREN(rack_counters),
1279 OID_AUTO, "calc_nonzero", CTLFLAG_RD,
1281 "Total number of times pacing time worked out to non-zero");
1282 rack_paced_segments = counter_u64_alloc(M_WAITOK);
1283 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1284 SYSCTL_CHILDREN(rack_counters),
1285 OID_AUTO, "paced", CTLFLAG_RD,
1286 &rack_paced_segments,
1287 "Total number of times a segment send caused hptsi");
1288 rack_unpaced_segments = counter_u64_alloc(M_WAITOK);
1289 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1290 SYSCTL_CHILDREN(rack_counters),
1291 OID_AUTO, "unpaced", CTLFLAG_RD,
1292 &rack_unpaced_segments,
1293 "Total number of times a segment did not cause hptsi");
1294 rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
1295 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1296 SYSCTL_CHILDREN(rack_counters),
1297 OID_AUTO, "saw_enobufs", CTLFLAG_RD,
1299 "Total number of times a segment did not cause hptsi");
1300 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
1301 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1302 SYSCTL_CHILDREN(rack_counters),
1303 OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
1304 &rack_saw_enetunreach,
1305 "Total number of times a segment did not cause hptsi");
1306 rack_to_alloc = counter_u64_alloc(M_WAITOK);
1307 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1308 SYSCTL_CHILDREN(rack_counters),
1309 OID_AUTO, "allocs", CTLFLAG_RD,
1311 "Total allocations of tracking structures");
1312 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
1313 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1314 SYSCTL_CHILDREN(rack_counters),
1315 OID_AUTO, "allochard", CTLFLAG_RD,
1316 &rack_to_alloc_hard,
1317 "Total allocations done with sleeping the hard way");
1318 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
1319 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1320 SYSCTL_CHILDREN(rack_counters),
1321 OID_AUTO, "allocemerg", CTLFLAG_RD,
1322 &rack_to_alloc_emerg,
1323 "Total allocations done from emergency cache");
1324 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK);
1325 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1326 SYSCTL_CHILDREN(rack_counters),
1327 OID_AUTO, "alloc_limited", CTLFLAG_RD,
1328 &rack_to_alloc_limited,
1329 "Total allocations dropped due to limit");
1330 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK);
1331 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1332 SYSCTL_CHILDREN(rack_counters),
1333 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD,
1334 &rack_alloc_limited_conns,
1335 "Connections with allocations dropped due to limit");
1336 rack_split_limited = counter_u64_alloc(M_WAITOK);
1337 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1338 SYSCTL_CHILDREN(rack_counters),
1339 OID_AUTO, "split_limited", CTLFLAG_RD,
1340 &rack_split_limited,
1341 "Split allocations dropped due to limit");
1342 rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
1343 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1344 SYSCTL_CHILDREN(rack_counters),
1345 OID_AUTO, "sack_long", CTLFLAG_RD,
1346 &rack_sack_proc_all,
1347 "Total times we had to walk whole list for sack processing");
1348 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
1349 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1350 SYSCTL_CHILDREN(rack_counters),
1351 OID_AUTO, "sack_restart", CTLFLAG_RD,
1352 &rack_sack_proc_restart,
1353 "Total times we had to walk whole list due to a restart");
1354 rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
1355 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1356 SYSCTL_CHILDREN(rack_counters),
1357 OID_AUTO, "sack_short", CTLFLAG_RD,
1358 &rack_sack_proc_short,
1359 "Total times we took shortcut for sack processing");
1360 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK);
1361 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1362 SYSCTL_CHILDREN(rack_counters),
1363 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD,
1364 &rack_enter_tlp_calc,
1365 "Total times we called calc-tlp");
1366 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK);
1367 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1368 SYSCTL_CHILDREN(rack_counters),
1369 OID_AUTO, "hit_tlp_method", CTLFLAG_RD,
1370 &rack_used_tlpmethod,
1371 "Total number of runt sacks");
1372 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK);
1373 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1374 SYSCTL_CHILDREN(rack_counters),
1375 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD,
1376 &rack_used_tlpmethod2,
1377 "Total number of times we hit TLP method 2");
1378 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK);
1379 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1380 SYSCTL_CHILDREN(rack_attack),
1381 OID_AUTO, "skipacked", CTLFLAG_RD,
1382 &rack_sack_skipped_acked,
1383 "Total number of times we skipped previously sacked");
1384 rack_sack_splits = counter_u64_alloc(M_WAITOK);
1385 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1386 SYSCTL_CHILDREN(rack_attack),
1387 OID_AUTO, "ofsplit", CTLFLAG_RD,
1389 "Total number of times we did the old fashion tree split");
1390 rack_progress_drops = counter_u64_alloc(M_WAITOK);
1391 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1392 SYSCTL_CHILDREN(rack_counters),
1393 OID_AUTO, "prog_drops", CTLFLAG_RD,
1394 &rack_progress_drops,
1395 "Total number of progress drops");
1396 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
1397 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1398 SYSCTL_CHILDREN(rack_counters),
1399 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
1400 &rack_input_idle_reduces,
1401 "Total number of idle reductions on input");
1402 rack_collapsed_win = counter_u64_alloc(M_WAITOK);
1403 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1404 SYSCTL_CHILDREN(rack_counters),
1405 OID_AUTO, "collapsed_win", CTLFLAG_RD,
1406 &rack_collapsed_win,
1407 "Total number of collapsed windows");
1408 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK);
1409 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1410 SYSCTL_CHILDREN(rack_counters),
1411 OID_AUTO, "tlp_nada", CTLFLAG_RD,
1412 &rack_tlp_does_nada,
1413 "Total number of nada tlp calls");
1414 rack_try_scwnd = counter_u64_alloc(M_WAITOK);
1415 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1416 SYSCTL_CHILDREN(rack_counters),
1417 OID_AUTO, "tried_scwnd", CTLFLAG_RD,
1419 "Total number of scwnd attempts");
1421 rack_per_timer_hole = counter_u64_alloc(M_WAITOK);
1422 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1423 SYSCTL_CHILDREN(rack_counters),
1424 OID_AUTO, "timer_hole", CTLFLAG_RD,
1425 &rack_per_timer_hole,
1426 "Total persists start in timer hole");
1427 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
1428 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1429 OID_AUTO, "outsize", CTLFLAG_RD,
1430 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
1431 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
1432 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1433 OID_AUTO, "opts", CTLFLAG_RD,
1434 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
1435 SYSCTL_ADD_PROC(&rack_sysctl_ctx,
1436 SYSCTL_CHILDREN(rack_sysctl_root),
1437 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1438 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
1442 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a)
1444 if (SEQ_GEQ(b->r_start, a->r_start) &&
1445 SEQ_LT(b->r_start, a->r_end)) {
1447 * The entry b is within the
1449 * a -- |-------------|
1454 * b -- |-----------|
1457 } else if (SEQ_GEQ(b->r_start, a->r_end)) {
1459 * b falls as either the next
1460 * sequence block after a so a
1461 * is said to be smaller than b.
1471 * Whats left is where a is
1472 * larger than b. i.e:
1476 * b -- |--------------|
1481 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1482 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1485 rc_init_window(struct tcp_rack *rack)
1489 if (rack->rc_init_win == 0) {
1491 * Nothing set by the user, use the system stack
1494 return(tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)));
1496 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win;
1501 rack_get_fixed_pacing_bw(struct tcp_rack *rack)
1503 if (IN_RECOVERY(rack->rc_tp->t_flags))
1504 return (rack->r_ctl.rc_fixed_pacing_rate_rec);
1505 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
1506 return (rack->r_ctl.rc_fixed_pacing_rate_ss);
1508 return (rack->r_ctl.rc_fixed_pacing_rate_ca);
1512 rack_get_bw(struct tcp_rack *rack)
1514 if (rack->use_fixed_rate) {
1515 /* Return the fixed pacing rate */
1516 return (rack_get_fixed_pacing_bw(rack));
1518 if (rack->r_ctl.gp_bw == 0) {
1520 * We have yet no b/w measurement,
1521 * if we have a user set initial bw
1522 * return it. If we don't have that and
1523 * we have an srtt, use the tcp IW (10) to
1524 * calculate a fictional b/w over the SRTT
1525 * which is more or less a guess. Note
1526 * we don't use our IW from rack on purpose
1527 * so if we have like IW=30, we are not
1528 * calculating a "huge" b/w.
1531 if (rack->r_ctl.init_rate)
1532 return (rack->r_ctl.init_rate);
1534 /* Has the user set a max peak rate? */
1535 #ifdef NETFLIX_PEAKRATE
1536 if (rack->rc_tp->t_maxpeakrate)
1537 return (rack->rc_tp->t_maxpeakrate);
1539 /* Ok lets come up with the IW guess, if we have a srtt */
1540 if (rack->rc_tp->t_srtt == 0) {
1542 * Go with old pacing method
1543 * i.e. burst mitigation only.
1547 /* Ok lets get the initial TCP win (not racks) */
1548 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp));
1549 srtt = ((uint64_t)TICKS_2_USEC(rack->rc_tp->t_srtt) >> TCP_RTT_SHIFT);
1550 bw *= (uint64_t)USECS_IN_SECOND;
1556 if(rack->r_ctl.num_avg >= RACK_REQ_AVG) {
1557 /* Averaging is done, we can return the value */
1558 bw = rack->r_ctl.gp_bw;
1560 /* Still doing initial average must calculate */
1561 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_avg;
1563 #ifdef NETFLIX_PEAKRATE
1564 if ((rack->rc_tp->t_maxpeakrate) &&
1565 (bw > rack->rc_tp->t_maxpeakrate)) {
1566 /* The user has set a peak rate to pace at
1567 * don't allow us to pace faster than that.
1569 return (rack->rc_tp->t_maxpeakrate);
1577 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm)
1579 if (rack->use_fixed_rate) {
1581 } else if (rack->in_probe_rtt && (rsm == NULL))
1582 return(rack->r_ctl.rack_per_of_gp_probertt);
1583 else if ((IN_RECOVERY(rack->rc_tp->t_flags) &&
1584 rack->r_ctl.rack_per_of_gp_rec)) {
1586 /* a retransmission always use the recovery rate */
1587 return(rack->r_ctl.rack_per_of_gp_rec);
1588 } else if (rack->rack_rec_nonrxt_use_cr) {
1589 /* Directed to use the configured rate */
1590 goto configured_rate;
1591 } else if (rack->rack_no_prr &&
1592 (rack->r_ctl.rack_per_of_gp_rec > 100)) {
1593 /* No PRR, lets just use the b/w estimate only */
1597 * Here we may have a non-retransmit but we
1598 * have no overrides, so just use the recovery
1599 * rate (prr is in effect).
1601 return(rack->r_ctl.rack_per_of_gp_rec);
1605 /* For the configured rate we look at our cwnd vs the ssthresh */
1606 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
1607 return (rack->r_ctl.rack_per_of_gp_ss);
1609 return(rack->r_ctl.rack_per_of_gp_ca);
1613 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm)
1616 * We allow rack_per_of_gp_xx to dictate our bw rate we want.
1621 gain = (uint64_t)rack_get_output_gain(rack, rsm);
1623 bw_est /= (uint64_t)100;
1624 /* Never fall below the minimum (def 64kbps) */
1625 if (bw_est < RACK_MIN_BW)
1626 bw_est = RACK_MIN_BW;
1631 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod)
1633 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1634 union tcp_log_stackspecific log;
1637 if ((mod != 1) && (rack_verbose_logging == 0)) {
1639 * We get 3 values currently for mod
1640 * 1 - We are retransmitting and this tells the reason.
1641 * 2 - We are clearing a dup-ack count.
1642 * 3 - We are incrementing a dup-ack count.
1644 * The clear/increment are only logged
1645 * if you have BBverbose on.
1649 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1650 log.u_bbr.flex1 = tsused;
1651 log.u_bbr.flex2 = thresh;
1652 log.u_bbr.flex3 = rsm->r_flags;
1653 log.u_bbr.flex4 = rsm->r_dupack;
1654 log.u_bbr.flex5 = rsm->r_start;
1655 log.u_bbr.flex6 = rsm->r_end;
1656 log.u_bbr.flex8 = mod;
1657 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1658 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1659 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1660 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1661 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1662 &rack->rc_inp->inp_socket->so_rcv,
1663 &rack->rc_inp->inp_socket->so_snd,
1664 BBR_LOG_SETTINGS_CHG, 0,
1665 0, &log, false, &tv);
1670 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
1672 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1673 union tcp_log_stackspecific log;
1676 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1677 log.u_bbr.flex1 = TICKS_2_MSEC(rack->rc_tp->t_srtt >> TCP_RTT_SHIFT);
1678 log.u_bbr.flex2 = to * 1000;
1679 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
1680 log.u_bbr.flex4 = slot;
1681 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot;
1682 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
1683 log.u_bbr.flex7 = rack->rc_in_persist;
1684 log.u_bbr.flex8 = which;
1685 if (rack->rack_no_prr)
1686 log.u_bbr.pkts_out = 0;
1688 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
1689 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1690 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1691 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1692 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1693 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1694 &rack->rc_inp->inp_socket->so_rcv,
1695 &rack->rc_inp->inp_socket->so_snd,
1696 BBR_LOG_TIMERSTAR, 0,
1697 0, &log, false, &tv);
1702 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm)
1704 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1705 union tcp_log_stackspecific log;
1708 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1709 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1710 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1711 log.u_bbr.flex8 = to_num;
1712 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
1713 log.u_bbr.flex2 = rack->rc_rack_rtt;
1715 log.u_bbr.flex3 = 0;
1717 log.u_bbr.flex3 = rsm->r_end - rsm->r_start;
1718 if (rack->rack_no_prr)
1719 log.u_bbr.flex5 = 0;
1721 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
1722 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1723 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1724 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1725 &rack->rc_inp->inp_socket->so_rcv,
1726 &rack->rc_inp->inp_socket->so_snd,
1728 0, &log, false, &tv);
1733 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len,
1734 struct rack_sendmap *rsm, int conf)
1736 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
1737 union tcp_log_stackspecific log;
1739 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1740 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1741 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1742 log.u_bbr.flex1 = t;
1743 log.u_bbr.flex2 = len;
1744 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt * HPTS_USEC_IN_MSEC;
1745 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest * HPTS_USEC_IN_MSEC;
1746 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest * HPTS_USEC_IN_MSEC;
1747 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_rtt_cnt;
1748 log.u_bbr.flex7 = conf;
1749 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot * (uint64_t)HPTS_USEC_IN_MSEC;
1750 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
1751 if (rack->rack_no_prr)
1752 log.u_bbr.pkts_out = 0;
1754 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
1755 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1756 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtt;
1757 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags;
1758 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1760 log.u_bbr.pkt_epoch = rsm->r_start;
1761 log.u_bbr.lost = rsm->r_end;
1762 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt;
1765 log.u_bbr.pkt_epoch = rack->rc_tp->iss;
1767 log.u_bbr.cwnd_gain = 0;
1769 /* Write out general bits of interest rrs here */
1770 log.u_bbr.use_lt_bw = rack->rc_highly_buffered;
1771 log.u_bbr.use_lt_bw <<= 1;
1772 log.u_bbr.use_lt_bw |= rack->forced_ack;
1773 log.u_bbr.use_lt_bw <<= 1;
1774 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul;
1775 log.u_bbr.use_lt_bw <<= 1;
1776 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
1777 log.u_bbr.use_lt_bw <<= 1;
1778 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
1779 log.u_bbr.use_lt_bw <<= 1;
1780 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
1781 log.u_bbr.use_lt_bw <<= 1;
1782 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
1783 log.u_bbr.use_lt_bw <<= 1;
1784 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom;
1785 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight;
1786 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts;
1787 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered;
1788 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts;
1789 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt;
1790 TCP_LOG_EVENTP(tp, NULL,
1791 &rack->rc_inp->inp_socket->so_rcv,
1792 &rack->rc_inp->inp_socket->so_snd,
1794 0, &log, false, &tv);
1799 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
1802 * Log the rtt sample we are
1803 * applying to the srtt algorithm in
1806 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1807 union tcp_log_stackspecific log;
1810 /* Convert our ms to a microsecond */
1811 memset(&log, 0, sizeof(log));
1812 log.u_bbr.flex1 = rtt * 1000;
1813 log.u_bbr.flex2 = rack->r_ctl.ack_count;
1814 log.u_bbr.flex3 = rack->r_ctl.sack_count;
1815 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
1816 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra;
1817 log.u_bbr.flex8 = rack->sack_attack_disable;
1818 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1819 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1820 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1821 &rack->rc_inp->inp_socket->so_rcv,
1822 &rack->rc_inp->inp_socket->so_snd,
1824 0, &log, false, &tv);
1829 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line)
1831 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
1832 union tcp_log_stackspecific log;
1835 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1836 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1837 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1838 log.u_bbr.flex1 = line;
1839 log.u_bbr.flex2 = tick;
1840 log.u_bbr.flex3 = tp->t_maxunacktime;
1841 log.u_bbr.flex4 = tp->t_acktime;
1842 log.u_bbr.flex8 = event;
1843 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1844 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1845 TCP_LOG_EVENTP(tp, NULL,
1846 &rack->rc_inp->inp_socket->so_rcv,
1847 &rack->rc_inp->inp_socket->so_snd,
1848 BBR_LOG_PROGRESS, 0,
1849 0, &log, false, &tv);
1854 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv)
1856 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1857 union tcp_log_stackspecific log;
1859 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1860 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1861 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1862 log.u_bbr.flex1 = slot;
1863 if (rack->rack_no_prr)
1864 log.u_bbr.flex2 = 0;
1866 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt;
1867 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
1868 log.u_bbr.flex8 = rack->rc_in_persist;
1869 log.u_bbr.timeStamp = cts;
1870 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1871 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1872 &rack->rc_inp->inp_socket->so_rcv,
1873 &rack->rc_inp->inp_socket->so_snd,
1875 0, &log, false, tv);
1880 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out)
1882 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1883 union tcp_log_stackspecific log;
1886 memset(&log, 0, sizeof(log));
1887 log.u_bbr.flex1 = did_out;
1888 log.u_bbr.flex2 = nxt_pkt;
1889 log.u_bbr.flex3 = way_out;
1890 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
1891 if (rack->rack_no_prr)
1892 log.u_bbr.flex5 = 0;
1894 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
1895 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs;
1896 log.u_bbr.flex7 = rack->r_wanted_output;
1897 log.u_bbr.flex8 = rack->rc_in_persist;
1898 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1899 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1900 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1901 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1902 &rack->rc_inp->inp_socket->so_rcv,
1903 &rack->rc_inp->inp_socket->so_snd,
1904 BBR_LOG_DOSEG_DONE, 0,
1905 0, &log, false, &tv);
1910 rack_log_type_hrdwtso(struct tcpcb *tp, struct tcp_rack *rack, int len, int mod, int32_t orig_len, int frm)
1912 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
1913 union tcp_log_stackspecific log;
1917 memset(&log, 0, sizeof(log));
1918 cts = tcp_get_usecs(&tv);
1919 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs;
1920 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
1921 log.u_bbr.flex4 = len;
1922 log.u_bbr.flex5 = orig_len;
1923 log.u_bbr.flex6 = rack->r_ctl.rc_sacked;
1924 log.u_bbr.flex7 = mod;
1925 log.u_bbr.flex8 = frm;
1926 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1927 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1928 TCP_LOG_EVENTP(tp, NULL,
1929 &tp->t_inpcb->inp_socket->so_rcv,
1930 &tp->t_inpcb->inp_socket->so_snd,
1931 TCP_HDWR_PACE_SIZE, 0,
1932 0, &log, false, &tv);
1937 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot,
1938 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use)
1940 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1941 union tcp_log_stackspecific log;
1944 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1945 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1946 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1947 log.u_bbr.flex1 = slot;
1948 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
1949 log.u_bbr.flex4 = reason;
1950 if (rack->rack_no_prr)
1951 log.u_bbr.flex5 = 0;
1953 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
1954 log.u_bbr.flex7 = hpts_calling;
1955 log.u_bbr.flex8 = rack->rc_in_persist;
1956 log.u_bbr.lt_epoch = cwnd_to_use;
1957 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1958 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1959 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1960 &rack->rc_inp->inp_socket->so_rcv,
1961 &rack->rc_inp->inp_socket->so_snd,
1963 tlen, &log, false, &tv);
1968 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts,
1969 struct timeval *tv, uint32_t flags_on_entry)
1971 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1972 union tcp_log_stackspecific log;
1974 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1975 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1976 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1977 log.u_bbr.flex1 = line;
1978 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to;
1979 log.u_bbr.flex3 = flags_on_entry;
1980 log.u_bbr.flex4 = us_cts;
1981 if (rack->rack_no_prr)
1982 log.u_bbr.flex5 = 0;
1984 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
1985 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
1986 log.u_bbr.flex7 = hpts_removed;
1987 log.u_bbr.flex8 = 1;
1988 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags;
1989 log.u_bbr.timeStamp = us_cts;
1990 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1991 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1992 &rack->rc_inp->inp_socket->so_rcv,
1993 &rack->rc_inp->inp_socket->so_snd,
1994 BBR_LOG_TIMERCANC, 0,
1995 0, &log, false, tv);
2000 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
2001 uint32_t flex1, uint32_t flex2,
2002 uint32_t flex3, uint32_t flex4,
2003 uint32_t flex5, uint32_t flex6,
2004 uint16_t flex7, uint8_t mod)
2006 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2007 union tcp_log_stackspecific log;
2011 /* No you can't use 1, its for the real to cancel */
2014 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2015 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2016 log.u_bbr.flex1 = flex1;
2017 log.u_bbr.flex2 = flex2;
2018 log.u_bbr.flex3 = flex3;
2019 log.u_bbr.flex4 = flex4;
2020 log.u_bbr.flex5 = flex5;
2021 log.u_bbr.flex6 = flex6;
2022 log.u_bbr.flex7 = flex7;
2023 log.u_bbr.flex8 = mod;
2024 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2025 &rack->rc_inp->inp_socket->so_rcv,
2026 &rack->rc_inp->inp_socket->so_snd,
2027 BBR_LOG_TIMERCANC, 0,
2028 0, &log, false, &tv);
2033 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
2035 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2036 union tcp_log_stackspecific log;
2039 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2040 log.u_bbr.flex1 = timers;
2041 log.u_bbr.flex2 = ret;
2042 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
2043 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2044 log.u_bbr.flex5 = cts;
2045 if (rack->rack_no_prr)
2046 log.u_bbr.flex6 = 0;
2048 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt;
2049 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2050 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2051 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2052 &rack->rc_inp->inp_socket->so_rcv,
2053 &rack->rc_inp->inp_socket->so_snd,
2054 BBR_LOG_TO_PROCESS, 0,
2055 0, &log, false, &tv);
2060 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd)
2062 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2063 union tcp_log_stackspecific log;
2066 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2067 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out;
2068 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs;
2069 if (rack->rack_no_prr)
2070 log.u_bbr.flex3 = 0;
2072 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt;
2073 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered;
2074 log.u_bbr.flex5 = rack->r_ctl.rc_sacked;
2075 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt;
2076 log.u_bbr.flex8 = frm;
2077 log.u_bbr.pkts_out = orig_cwnd;
2078 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2079 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2080 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2081 &rack->rc_inp->inp_socket->so_rcv,
2082 &rack->rc_inp->inp_socket->so_snd,
2084 0, &log, false, &tv);
2088 #ifdef NETFLIX_EXP_DETECTION
2090 rack_log_sad(struct tcp_rack *rack, int event)
2092 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2093 union tcp_log_stackspecific log;
2096 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2097 log.u_bbr.flex1 = rack->r_ctl.sack_count;
2098 log.u_bbr.flex2 = rack->r_ctl.ack_count;
2099 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra;
2100 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
2101 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced;
2102 log.u_bbr.flex6 = tcp_sack_to_ack_thresh;
2103 log.u_bbr.pkts_out = tcp_sack_to_move_thresh;
2104 log.u_bbr.lt_epoch = (tcp_force_detection << 8);
2105 log.u_bbr.lt_epoch |= rack->do_detection;
2106 log.u_bbr.applimited = tcp_map_minimum;
2107 log.u_bbr.flex7 = rack->sack_attack_disable;
2108 log.u_bbr.flex8 = event;
2109 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2110 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2111 log.u_bbr.delivered = tcp_sad_decay_val;
2112 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2113 &rack->rc_inp->inp_socket->so_rcv,
2114 &rack->rc_inp->inp_socket->so_snd,
2115 TCP_SAD_DETECTION, 0,
2116 0, &log, false, &tv);
2122 rack_counter_destroy(void)
2124 counter_u64_free(rack_ack_total);
2125 counter_u64_free(rack_express_sack);
2126 counter_u64_free(rack_sack_total);
2127 counter_u64_free(rack_move_none);
2128 counter_u64_free(rack_move_some);
2129 counter_u64_free(rack_sack_attacks_detected);
2130 counter_u64_free(rack_sack_attacks_reversed);
2131 counter_u64_free(rack_sack_used_next_merge);
2132 counter_u64_free(rack_sack_used_prev_merge);
2133 counter_u64_free(rack_badfr);
2134 counter_u64_free(rack_badfr_bytes);
2135 counter_u64_free(rack_rtm_prr_retran);
2136 counter_u64_free(rack_rtm_prr_newdata);
2137 counter_u64_free(rack_timestamp_mismatch);
2138 counter_u64_free(rack_find_high);
2139 counter_u64_free(rack_reorder_seen);
2140 counter_u64_free(rack_tlp_tot);
2141 counter_u64_free(rack_tlp_newdata);
2142 counter_u64_free(rack_tlp_retran);
2143 counter_u64_free(rack_tlp_retran_bytes);
2144 counter_u64_free(rack_tlp_retran_fail);
2145 counter_u64_free(rack_to_tot);
2146 counter_u64_free(rack_to_arm_rack);
2147 counter_u64_free(rack_to_arm_tlp);
2148 counter_u64_free(rack_calc_zero);
2149 counter_u64_free(rack_calc_nonzero);
2150 counter_u64_free(rack_paced_segments);
2151 counter_u64_free(rack_unpaced_segments);
2152 counter_u64_free(rack_saw_enobuf);
2153 counter_u64_free(rack_saw_enetunreach);
2154 counter_u64_free(rack_to_alloc);
2155 counter_u64_free(rack_to_alloc_hard);
2156 counter_u64_free(rack_to_alloc_emerg);
2157 counter_u64_free(rack_to_alloc_limited);
2158 counter_u64_free(rack_alloc_limited_conns);
2159 counter_u64_free(rack_split_limited);
2160 counter_u64_free(rack_sack_proc_all);
2161 counter_u64_free(rack_sack_proc_restart);
2162 counter_u64_free(rack_sack_proc_short);
2163 counter_u64_free(rack_enter_tlp_calc);
2164 counter_u64_free(rack_used_tlpmethod);
2165 counter_u64_free(rack_used_tlpmethod2);
2166 counter_u64_free(rack_sack_skipped_acked);
2167 counter_u64_free(rack_sack_splits);
2168 counter_u64_free(rack_progress_drops);
2169 counter_u64_free(rack_input_idle_reduces);
2170 counter_u64_free(rack_collapsed_win);
2171 counter_u64_free(rack_tlp_does_nada);
2172 counter_u64_free(rack_try_scwnd);
2173 counter_u64_free(rack_per_timer_hole);
2174 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
2175 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
2178 static struct rack_sendmap *
2179 rack_alloc(struct tcp_rack *rack)
2181 struct rack_sendmap *rsm;
2183 rsm = uma_zalloc(rack_zone, M_NOWAIT);
2185 rack->r_ctl.rc_num_maps_alloced++;
2186 counter_u64_add(rack_to_alloc, 1);
2189 if (rack->rc_free_cnt) {
2190 counter_u64_add(rack_to_alloc_emerg, 1);
2191 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
2192 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2193 rack->rc_free_cnt--;
2199 static struct rack_sendmap *
2200 rack_alloc_full_limit(struct tcp_rack *rack)
2202 if ((V_tcp_map_entries_limit > 0) &&
2203 (rack->do_detection == 0) &&
2204 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
2205 counter_u64_add(rack_to_alloc_limited, 1);
2206 if (!rack->alloc_limit_reported) {
2207 rack->alloc_limit_reported = 1;
2208 counter_u64_add(rack_alloc_limited_conns, 1);
2212 return (rack_alloc(rack));
2215 /* wrapper to allocate a sendmap entry, subject to a specific limit */
2216 static struct rack_sendmap *
2217 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type)
2219 struct rack_sendmap *rsm;
2222 /* currently there is only one limit type */
2223 if (V_tcp_map_split_limit > 0 &&
2224 (rack->do_detection == 0) &&
2225 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) {
2226 counter_u64_add(rack_split_limited, 1);
2227 if (!rack->alloc_limit_reported) {
2228 rack->alloc_limit_reported = 1;
2229 counter_u64_add(rack_alloc_limited_conns, 1);
2235 /* allocate and mark in the limit type, if set */
2236 rsm = rack_alloc(rack);
2237 if (rsm != NULL && limit_type) {
2238 rsm->r_limit_type = limit_type;
2239 rack->r_ctl.rc_num_split_allocs++;
2245 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
2247 if (rsm->r_flags & RACK_APP_LIMITED) {
2248 if (rack->r_ctl.rc_app_limited_cnt > 0) {
2249 rack->r_ctl.rc_app_limited_cnt--;
2252 if (rsm->r_limit_type) {
2253 /* currently there is only one limit type */
2254 rack->r_ctl.rc_num_split_allocs--;
2256 if (rsm == rack->r_ctl.rc_first_appl) {
2257 if (rack->r_ctl.rc_app_limited_cnt == 0)
2258 rack->r_ctl.rc_first_appl = NULL;
2260 /* Follow the next one out */
2261 struct rack_sendmap fe;
2263 fe.r_start = rsm->r_nseq_appl;
2264 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
2267 if (rsm == rack->r_ctl.rc_resend)
2268 rack->r_ctl.rc_resend = NULL;
2269 if (rsm == rack->r_ctl.rc_rsm_at_retran)
2270 rack->r_ctl.rc_rsm_at_retran = NULL;
2271 if (rsm == rack->r_ctl.rc_end_appl)
2272 rack->r_ctl.rc_end_appl = NULL;
2273 if (rack->r_ctl.rc_tlpsend == rsm)
2274 rack->r_ctl.rc_tlpsend = NULL;
2275 if (rack->r_ctl.rc_sacklast == rsm)
2276 rack->r_ctl.rc_sacklast = NULL;
2277 if (rack->rc_free_cnt < rack_free_cache) {
2278 memset(rsm, 0, sizeof(struct rack_sendmap));
2279 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
2280 rsm->r_limit_type = 0;
2281 rack->rc_free_cnt++;
2284 rack->r_ctl.rc_num_maps_alloced--;
2285 uma_zfree(rack_zone, rsm);
2289 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack)
2291 uint64_t srtt, bw, len, tim;
2292 uint32_t segsiz, def_len, minl;
2294 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
2295 def_len = rack_def_data_window * segsiz;
2296 if (rack->rc_gp_filled == 0) {
2298 * We have no measurement (IW is in flight?) so
2299 * we can only guess using our data_window sysctl
2300 * value (usually 100MSS).
2305 * Now we have a number of factors to consider.
2307 * 1) We have a desired BDP which is usually
2309 * 2) We have a minimum number of rtt's usually 1 SRTT
2310 * but we allow it too to be more.
2311 * 3) We want to make sure a measurement last N useconds (if
2312 * we have set rack_min_measure_usec.
2314 * We handle the first concern here by trying to create a data
2315 * window of max(rack_def_data_window, DesiredBDP). The
2316 * second concern we handle in not letting the measurement
2317 * window end normally until at least the required SRTT's
2318 * have gone by which is done further below in
2319 * rack_enough_for_measurement(). Finally the third concern
2320 * we also handle here by calculating how long that time
2321 * would take at the current BW and then return the
2322 * max of our first calculation and that length. Note
2323 * that if rack_min_measure_usec is 0, we don't deal
2324 * with concern 3. Also for both Concern 1 and 3 an
2325 * application limited period could end the measurement
2328 * So lets calculate the BDP with the "known" b/w using
2329 * the SRTT has our rtt and then multiply it by the
2332 bw = rack_get_bw(rack);
2333 srtt = ((uint64_t)TICKS_2_USEC(tp->t_srtt) >> TCP_RTT_SHIFT);
2335 len /= (uint64_t)HPTS_USEC_IN_SEC;
2336 len *= max(1, rack_goal_bdp);
2337 /* Now we need to round up to the nearest MSS */
2338 len = roundup(len, segsiz);
2339 if (rack_min_measure_usec) {
2340 /* Now calculate our min length for this b/w */
2341 tim = rack_min_measure_usec;
2342 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC;
2345 minl = roundup(minl, segsiz);
2350 * Now if we have a very small window we want
2351 * to attempt to get the window that is
2352 * as small as possible. This happens on
2353 * low b/w connections and we don't want to
2354 * span huge numbers of rtt's between measurements.
2356 * We basically include 2 over our "MIN window" so
2357 * that the measurement can be shortened (possibly) by
2361 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz)));
2363 return (max((uint32_t)len, def_len));
2368 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack)
2370 uint32_t tim, srtts, segsiz;
2373 * Has enough time passed for the GP measurement to be valid?
2375 if ((tp->snd_max == tp->snd_una) ||
2376 (th_ack == tp->snd_max)){
2380 if (SEQ_LT(th_ack, tp->gput_seq)) {
2381 /* Not enough bytes yet */
2384 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
2385 if (SEQ_LT(th_ack, tp->gput_ack) &&
2386 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
2387 /* Not enough bytes yet */
2390 if (rack->r_ctl.rc_first_appl &&
2391 (rack->r_ctl.rc_first_appl->r_start == th_ack)) {
2393 * We are up to the app limited point
2394 * we have to measure irrespective of the time..
2398 /* Now what about time? */
2399 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts);
2400 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts;
2404 /* Nope not even a full SRTT has passed */
2409 rack_log_timely(struct tcp_rack *rack,
2410 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd,
2411 uint64_t up_bnd, int line, uint8_t method)
2413 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2414 union tcp_log_stackspecific log;
2417 memset(&log, 0, sizeof(log));
2418 log.u_bbr.flex1 = logged;
2419 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt;
2420 log.u_bbr.flex2 <<= 4;
2421 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt;
2422 log.u_bbr.flex2 <<= 4;
2423 log.u_bbr.flex2 |= rack->rc_gp_incr;
2424 log.u_bbr.flex2 <<= 4;
2425 log.u_bbr.flex2 |= rack->rc_gp_bwred;
2426 log.u_bbr.flex3 = rack->rc_gp_incr;
2427 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
2428 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca;
2429 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec;
2430 log.u_bbr.flex7 = rack->rc_gp_bwred;
2431 log.u_bbr.flex8 = method;
2432 log.u_bbr.cur_del_rate = cur_bw;
2433 log.u_bbr.delRate = low_bnd;
2434 log.u_bbr.bw_inuse = up_bnd;
2435 log.u_bbr.rttProp = rack_get_bw(rack);
2436 log.u_bbr.pkt_epoch = line;
2437 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
2438 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2439 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2440 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
2441 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
2442 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom;
2443 log.u_bbr.cwnd_gain <<= 1;
2444 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec;
2445 log.u_bbr.cwnd_gain <<= 1;
2446 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
2447 log.u_bbr.cwnd_gain <<= 1;
2448 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
2449 log.u_bbr.lost = rack->r_ctl.rc_loss_count;
2450 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2451 &rack->rc_inp->inp_socket->so_rcv,
2452 &rack->rc_inp->inp_socket->so_snd,
2454 0, &log, false, &tv);
2459 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult)
2462 * Before we increase we need to know if
2463 * the estimate just made was less than
2464 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est)
2466 * If we already are pacing at a fast enough
2467 * rate to push us faster there is no sense of
2470 * We first caculate our actual pacing rate (ss or ca multipler
2471 * times our cur_bw).
2473 * Then we take the last measured rate and multipy by our
2474 * maximum pacing overage to give us a max allowable rate.
2476 * If our act_rate is smaller than our max_allowable rate
2477 * then we should increase. Else we should hold steady.
2480 uint64_t act_rate, max_allow_rate;
2482 if (rack_timely_no_stopping)
2485 if ((cur_bw == 0) || (last_bw_est == 0)) {
2487 * Initial startup case or
2488 * everything is acked case.
2490 rack_log_timely(rack, mult, cur_bw, 0, 0,
2496 * We can always pace at or slightly above our rate.
2498 rack_log_timely(rack, mult, cur_bw, 0, 0,
2502 act_rate = cur_bw * (uint64_t)mult;
2504 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100);
2505 max_allow_rate /= 100;
2506 if (act_rate < max_allow_rate) {
2508 * Here the rate we are actually pacing at
2509 * is smaller than 10% above our last measurement.
2510 * This means we are pacing below what we would
2511 * like to try to achieve (plus some wiggle room).
2513 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
2518 * Here we are already pacing at least rack_max_per_above(10%)
2519 * what we are getting back. This indicates most likely
2520 * that we are being limited (cwnd/rwnd/app) and can't
2521 * get any more b/w. There is no sense of trying to
2522 * raise up the pacing rate its not speeding us up
2523 * and we already are pacing faster than we are getting.
2525 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
2532 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack)
2535 * When we drag bottom, we want to assure
2536 * that no multiplier is below 1.0, if so
2537 * we want to restore it to at least that.
2539 if (rack->r_ctl.rack_per_of_gp_rec < 100) {
2540 /* This is unlikely we usually do not touch recovery */
2541 rack->r_ctl.rack_per_of_gp_rec = 100;
2543 if (rack->r_ctl.rack_per_of_gp_ca < 100) {
2544 rack->r_ctl.rack_per_of_gp_ca = 100;
2546 if (rack->r_ctl.rack_per_of_gp_ss < 100) {
2547 rack->r_ctl.rack_per_of_gp_ss = 100;
2552 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack)
2554 if (rack->r_ctl.rack_per_of_gp_ca > 100) {
2555 rack->r_ctl.rack_per_of_gp_ca = 100;
2557 if (rack->r_ctl.rack_per_of_gp_ss > 100) {
2558 rack->r_ctl.rack_per_of_gp_ss = 100;
2563 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override)
2565 int32_t calc, logged, plus;
2571 * override is passed when we are
2572 * loosing b/w and making one last
2573 * gasp at trying to not loose out
2574 * to a new-reno flow.
2578 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */
2579 if (rack->rc_gp_incr &&
2580 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) {
2582 * Reset and get 5 strokes more before the boost. Note
2583 * that the count is 0 based so we have to add one.
2586 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST;
2587 rack->rc_gp_timely_inc_cnt = 0;
2589 plus = (uint32_t)rack_gp_increase_per;
2590 /* Must be at least 1% increase for true timely increases */
2592 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0)))
2594 if (rack->rc_gp_saw_rec &&
2595 (rack->rc_gp_no_rec_chg == 0) &&
2596 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
2597 rack->r_ctl.rack_per_of_gp_rec)) {
2598 /* We have been in recovery ding it too */
2599 calc = rack->r_ctl.rack_per_of_gp_rec + plus;
2603 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc;
2604 if (rack_per_upper_bound_ss &&
2605 (rack->rc_dragged_bottom == 0) &&
2606 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss))
2607 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss;
2609 if (rack->rc_gp_saw_ca &&
2610 (rack->rc_gp_saw_ss == 0) &&
2611 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
2612 rack->r_ctl.rack_per_of_gp_ca)) {
2614 calc = rack->r_ctl.rack_per_of_gp_ca + plus;
2618 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc;
2619 if (rack_per_upper_bound_ca &&
2620 (rack->rc_dragged_bottom == 0) &&
2621 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca))
2622 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca;
2624 if (rack->rc_gp_saw_ss &&
2625 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
2626 rack->r_ctl.rack_per_of_gp_ss)) {
2628 calc = rack->r_ctl.rack_per_of_gp_ss + plus;
2631 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc;
2632 if (rack_per_upper_bound_ss &&
2633 (rack->rc_dragged_bottom == 0) &&
2634 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss))
2635 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss;
2639 (rack->rc_gp_incr == 0)){
2640 /* Go into increment mode */
2641 rack->rc_gp_incr = 1;
2642 rack->rc_gp_timely_inc_cnt = 0;
2644 if (rack->rc_gp_incr &&
2646 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) {
2647 rack->rc_gp_timely_inc_cnt++;
2649 rack_log_timely(rack, logged, plus, 0, 0,
2654 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff)
2657 * norm_grad = rtt_diff / minrtt;
2658 * new_per = curper * (1 - B * norm_grad)
2660 * B = rack_gp_decrease_per (default 10%)
2661 * rtt_dif = input var current rtt-diff
2662 * curper = input var current percentage
2663 * minrtt = from rack filter
2668 perf = (((uint64_t)curper * ((uint64_t)1000000 -
2669 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 *
2670 (((uint64_t)rtt_diff * (uint64_t)1000000)/
2671 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/
2672 (uint64_t)1000000)) /
2674 if (perf > curper) {
2678 return ((uint32_t)perf);
2682 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt)
2686 * result = curper * (1 - (B * ( 1 - ------ ))
2689 * B = rack_gp_decrease_per (default 10%)
2690 * highrttthresh = filter_min * rack_gp_rtt_maxmul
2693 uint32_t highrttthresh;
2695 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
2697 perf = (((uint64_t)curper * ((uint64_t)1000000 -
2698 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 -
2699 ((uint64_t)highrttthresh * (uint64_t)1000000) /
2700 (uint64_t)rtt)) / 100)) /(uint64_t)1000000);
2705 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff)
2707 uint64_t logvar, logvar2, logvar3;
2708 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val;
2710 if (rack->rc_gp_incr) {
2711 /* Turn off increment counting */
2712 rack->rc_gp_incr = 0;
2713 rack->rc_gp_timely_inc_cnt = 0;
2715 ss_red = ca_red = rec_red = 0;
2717 /* Calculate the reduction value */
2721 /* Must be at least 1% reduction */
2722 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) {
2723 /* We have been in recovery ding it too */
2724 if (timely_says == 2) {
2725 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt);
2726 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
2732 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
2733 if (rack->r_ctl.rack_per_of_gp_rec > val) {
2734 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val);
2735 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val;
2737 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
2740 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec)
2741 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
2744 if (rack->rc_gp_saw_ss) {
2746 if (timely_says == 2) {
2747 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt);
2748 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
2754 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff);
2755 if (rack->r_ctl.rack_per_of_gp_ss > new_per) {
2756 ss_red = rack->r_ctl.rack_per_of_gp_ss - val;
2757 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val;
2760 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
2764 logvar2 = (uint32_t)rtt;
2766 logvar2 |= (uint32_t)rtt_diff;
2767 logvar3 = rack_gp_rtt_maxmul;
2769 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
2770 rack_log_timely(rack, timely_says,
2772 logvar, __LINE__, 10);
2774 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss)
2775 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
2777 } else if (rack->rc_gp_saw_ca) {
2779 if (timely_says == 2) {
2780 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt);
2781 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
2787 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff);
2788 if (rack->r_ctl.rack_per_of_gp_ca > val) {
2789 ca_red = rack->r_ctl.rack_per_of_gp_ca - val;
2790 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val;
2792 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
2797 logvar2 = (uint32_t)rtt;
2799 logvar2 |= (uint32_t)rtt_diff;
2800 logvar3 = rack_gp_rtt_maxmul;
2802 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
2803 rack_log_timely(rack, timely_says,
2805 logvar, __LINE__, 10);
2807 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca)
2808 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
2811 if (rack->rc_gp_timely_dec_cnt < 0x7) {
2812 rack->rc_gp_timely_dec_cnt++;
2813 if (rack_timely_dec_clear &&
2814 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear))
2815 rack->rc_gp_timely_dec_cnt = 0;
2820 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar,
2825 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts,
2826 uint32_t rtt, uint32_t line, uint8_t reas)
2828 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2829 union tcp_log_stackspecific log;
2832 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2833 log.u_bbr.flex1 = line;
2834 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts;
2835 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts;
2836 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
2837 log.u_bbr.flex5 = rtt;
2838 log.u_bbr.flex6 = rack->rc_highly_buffered;
2839 log.u_bbr.flex6 <<= 1;
2840 log.u_bbr.flex6 |= rack->forced_ack;
2841 log.u_bbr.flex6 <<= 1;
2842 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul;
2843 log.u_bbr.flex6 <<= 1;
2844 log.u_bbr.flex6 |= rack->in_probe_rtt;
2845 log.u_bbr.flex6 <<= 1;
2846 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt;
2847 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt;
2848 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca;
2849 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec;
2850 log.u_bbr.flex8 = reas;
2851 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2852 log.u_bbr.delRate = rack_get_bw(rack);
2853 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt;
2854 log.u_bbr.cur_del_rate <<= 32;
2855 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt;
2856 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered;
2857 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
2858 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2859 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
2860 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
2861 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts;
2862 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight;
2863 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
2864 log.u_bbr.rttProp = us_cts;
2865 log.u_bbr.rttProp <<= 32;
2866 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt;
2867 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2868 &rack->rc_inp->inp_socket->so_rcv,
2869 &rack->rc_inp->inp_socket->so_snd,
2870 BBR_LOG_RTT_SHRINKS, 0,
2871 0, &log, false, &rack->r_ctl.act_rcv_time);
2876 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt)
2880 bwdp = rack_get_bw(rack);
2881 bwdp *= (uint64_t)rtt;
2882 bwdp /= (uint64_t)HPTS_USEC_IN_SEC;
2883 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz);
2884 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) {
2886 * A window protocol must be able to have 4 packets
2887 * outstanding as the floor in order to function
2888 * (especially considering delayed ack :D).
2890 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs);
2895 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts)
2898 * ProbeRTT is a bit different in rack_pacing than in
2899 * BBR. It is like BBR in that it uses the lowering of
2900 * the RTT as a signal that we saw something new and
2901 * counts from there for how long between. But it is
2902 * different in that its quite simple. It does not
2903 * play with the cwnd and wait until we get down
2904 * to N segments outstanding and hold that for
2905 * 200ms. Instead it just sets the pacing reduction
2906 * rate to a set percentage (70 by default) and hold
2907 * that for a number of recent GP Srtt's.
2911 if (rack->rc_gp_dyn_mul == 0)
2914 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) {
2918 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
2919 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
2921 * Stop the goodput now, the idea here is
2922 * that future measurements with in_probe_rtt
2923 * won't register if they are not greater so
2924 * we want to get what info (if any) is available
2927 rack_do_goodput_measurement(rack->rc_tp, rack,
2928 rack->rc_tp->snd_una, __LINE__);
2930 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
2931 rack->r_ctl.rc_time_probertt_entered = us_cts;
2932 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
2933 rack->r_ctl.rc_pace_min_segs);
2934 rack->in_probe_rtt = 1;
2935 rack->measure_saw_probe_rtt = 1;
2936 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
2937 rack->r_ctl.rc_time_probertt_starts = 0;
2938 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt;
2939 if (rack_probertt_use_min_rtt_entry)
2940 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
2942 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt);
2943 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
2944 __LINE__, RACK_RTTS_ENTERPROBE);
2948 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts)
2950 struct rack_sendmap *rsm;
2953 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
2954 rack->r_ctl.rc_pace_min_segs);
2955 rack->in_probe_rtt = 0;
2956 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
2957 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
2959 * Stop the goodput now, the idea here is
2960 * that future measurements with in_probe_rtt
2961 * won't register if they are not greater so
2962 * we want to get what info (if any) is available
2965 rack_do_goodput_measurement(rack->rc_tp, rack,
2966 rack->rc_tp->snd_una, __LINE__);
2967 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
2969 * We don't have enough data to make a measurement.
2970 * So lets just stop and start here after exiting
2971 * probe-rtt. We probably are not interested in
2972 * the results anyway.
2974 rack->rc_tp->t_flags &= ~TF_GPUTINPROG;
2977 * Measurements through the current snd_max are going
2978 * to be limited by the slower pacing rate.
2980 * We need to mark these as app-limited so we
2981 * don't collapse the b/w.
2983 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
2984 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
2985 if (rack->r_ctl.rc_app_limited_cnt == 0)
2986 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
2989 * Go out to the end app limited and mark
2990 * this new one as next and move the end_appl up
2993 if (rack->r_ctl.rc_end_appl)
2994 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
2995 rack->r_ctl.rc_end_appl = rsm;
2997 rsm->r_flags |= RACK_APP_LIMITED;
2998 rack->r_ctl.rc_app_limited_cnt++;
3001 * Now, we need to examine our pacing rate multipliers.
3002 * If its under 100%, we need to kick it back up to
3003 * 100%. We also don't let it be over our "max" above
3004 * the actual rate i.e. 100% + rack_clamp_atexit_prtt.
3005 * Note setting clamp_atexit_prtt to 0 has the effect
3006 * of setting CA/SS to 100% always at exit (which is
3007 * the default behavior).
3009 if (rack_probertt_clear_is) {
3010 rack->rc_gp_incr = 0;
3011 rack->rc_gp_bwred = 0;
3012 rack->rc_gp_timely_inc_cnt = 0;
3013 rack->rc_gp_timely_dec_cnt = 0;
3015 /* Do we do any clamping at exit? */
3016 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) {
3017 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp;
3018 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp;
3020 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) {
3021 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt;
3022 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt;
3025 * Lets set rtt_diff to 0, so that we will get a "boost"
3028 rack->r_ctl.rc_rtt_diff = 0;
3030 /* Clear all flags so we start fresh */
3031 rack->rc_tp->t_bytes_acked = 0;
3032 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND;
3034 * If configured to, set the cwnd and ssthresh to
3037 if (rack_probe_rtt_sets_cwnd) {
3041 /* Set ssthresh so we get into CA once we hit our target */
3042 if (rack_probertt_use_min_rtt_exit == 1) {
3043 /* Set to min rtt */
3044 rack_set_prtt_target(rack, segsiz,
3045 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
3046 } else if (rack_probertt_use_min_rtt_exit == 2) {
3047 /* Set to current gp rtt */
3048 rack_set_prtt_target(rack, segsiz,
3049 rack->r_ctl.rc_gp_srtt);
3050 } else if (rack_probertt_use_min_rtt_exit == 3) {
3051 /* Set to entry gp rtt */
3052 rack_set_prtt_target(rack, segsiz,
3053 rack->r_ctl.rc_entry_gp_rtt);
3058 sum = rack->r_ctl.rc_entry_gp_rtt;
3060 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt));
3063 * A highly buffered path needs
3064 * cwnd space for timely to work.
3065 * Lets set things up as if
3066 * we are heading back here again.
3068 setval = rack->r_ctl.rc_entry_gp_rtt;
3069 } else if (sum >= 15) {
3071 * Lets take the smaller of the
3072 * two since we are just somewhat
3075 setval = rack->r_ctl.rc_gp_srtt;
3076 if (setval > rack->r_ctl.rc_entry_gp_rtt)
3077 setval = rack->r_ctl.rc_entry_gp_rtt;
3080 * Here we are not highly buffered
3081 * and should pick the min we can to
3082 * keep from causing loss.
3084 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3086 rack_set_prtt_target(rack, segsiz,
3089 if (rack_probe_rtt_sets_cwnd > 1) {
3090 /* There is a percentage here to boost */
3091 ebdp = rack->r_ctl.rc_target_probertt_flight;
3092 ebdp *= rack_probe_rtt_sets_cwnd;
3094 setto = rack->r_ctl.rc_target_probertt_flight + ebdp;
3096 setto = rack->r_ctl.rc_target_probertt_flight;
3097 rack->rc_tp->snd_cwnd = roundup(setto, segsiz);
3098 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) {
3100 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs;
3102 /* If we set in the cwnd also set the ssthresh point so we are in CA */
3103 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1);
3105 rack_log_rtt_shrinks(rack, us_cts,
3106 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3107 __LINE__, RACK_RTTS_EXITPROBE);
3108 /* Clear times last so log has all the info */
3109 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max;
3110 rack->r_ctl.rc_time_probertt_entered = us_cts;
3111 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
3112 rack->r_ctl.rc_time_of_last_probertt = us_cts;
3116 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts)
3118 /* Check in on probe-rtt */
3119 if (rack->rc_gp_filled == 0) {
3120 /* We do not do p-rtt unless we have gp measurements */
3123 if (rack->in_probe_rtt) {
3124 uint64_t no_overflow;
3125 uint32_t endtime, must_stay;
3127 if (rack->r_ctl.rc_went_idle_time &&
3128 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) {
3130 * We went idle during prtt, just exit now.
3132 rack_exit_probertt(rack, us_cts);
3133 } else if (rack_probe_rtt_safety_val &&
3134 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) &&
3135 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) {
3137 * Probe RTT safety value triggered!
3139 rack_log_rtt_shrinks(rack, us_cts,
3140 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3141 __LINE__, RACK_RTTS_SAFETY);
3142 rack_exit_probertt(rack, us_cts);
3144 /* Calculate the max we will wait */
3145 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait);
3146 if (rack->rc_highly_buffered)
3147 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp);
3148 /* Calculate the min we must wait */
3149 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain);
3150 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) &&
3151 TSTMP_LT(us_cts, endtime)) {
3153 /* Do we lower more? */
3155 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered))
3156 calc = us_cts - rack->r_ctl.rc_time_probertt_entered;
3159 calc /= max(rack->r_ctl.rc_gp_srtt, 1);
3162 calc *= rack_per_of_gp_probertt_reduce;
3163 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc;
3165 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh)
3166 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh;
3168 /* We must reach target or the time set */
3171 if (rack->r_ctl.rc_time_probertt_starts == 0) {
3172 if ((TSTMP_LT(us_cts, must_stay) &&
3173 rack->rc_highly_buffered) ||
3174 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) >
3175 rack->r_ctl.rc_target_probertt_flight)) {
3176 /* We are not past the must_stay time */
3179 rack_log_rtt_shrinks(rack, us_cts,
3180 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3181 __LINE__, RACK_RTTS_REACHTARGET);
3182 rack->r_ctl.rc_time_probertt_starts = us_cts;
3183 if (rack->r_ctl.rc_time_probertt_starts == 0)
3184 rack->r_ctl.rc_time_probertt_starts = 1;
3185 /* Restore back to our rate we want to pace at in prtt */
3186 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
3189 * Setup our end time, some number of gp_srtts plus 200ms.
3191 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt *
3192 (uint64_t)rack_probertt_gpsrtt_cnt_mul);
3193 if (rack_probertt_gpsrtt_cnt_div)
3194 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div);
3197 endtime += rack_min_probertt_hold;
3198 endtime += rack->r_ctl.rc_time_probertt_starts;
3199 if (TSTMP_GEQ(us_cts, endtime)) {
3200 /* yes, exit probertt */
3201 rack_exit_probertt(rack, us_cts);
3204 } else if((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) {
3205 /* Go into probertt, its been too long since we went lower */
3206 rack_enter_probertt(rack, us_cts);
3211 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est,
3212 uint32_t rtt, int32_t rtt_diff)
3214 uint64_t cur_bw, up_bnd, low_bnd, subfr;
3217 if ((rack->rc_gp_dyn_mul == 0) ||
3218 (rack->use_fixed_rate) ||
3219 (rack->in_probe_rtt) ||
3220 (rack->rc_always_pace == 0)) {
3221 /* No dynamic GP multipler in play */
3224 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start;
3225 cur_bw = rack_get_bw(rack);
3226 /* Calculate our up and down range */
3227 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up;
3229 up_bnd += rack->r_ctl.last_gp_comp_bw;
3231 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down;
3233 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr;
3234 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) {
3236 * This is the case where our RTT is above
3237 * the max target and we have been configured
3238 * to just do timely no bonus up stuff in that case.
3240 * There are two configurations, set to 1, and we
3241 * just do timely if we are over our max. If its
3242 * set above 1 then we slam the multipliers down
3243 * to 100 and then decrement per timely.
3245 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3247 if (rack->r_ctl.rc_no_push_at_mrtt > 1)
3248 rack_validate_multipliers_at_or_below_100(rack);
3249 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
3250 } else if ((last_bw_est < low_bnd) && !losses) {
3252 * We are decreasing this is a bit complicated this
3253 * means we are loosing ground. This could be
3254 * because another flow entered and we are competing
3255 * for b/w with it. This will push the RTT up which
3256 * makes timely unusable unless we want to get shoved
3257 * into a corner and just be backed off (the age
3258 * old problem with delay based CC).
3260 * On the other hand if it was a route change we
3261 * would like to stay somewhat contained and not
3262 * blow out the buffers.
3264 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3266 rack->r_ctl.last_gp_comp_bw = cur_bw;
3267 if (rack->rc_gp_bwred == 0) {
3268 /* Go into reduction counting */
3269 rack->rc_gp_bwred = 1;
3270 rack->rc_gp_timely_dec_cnt = 0;
3272 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) ||
3273 (timely_says == 0)) {
3275 * Push another time with a faster pacing
3276 * to try to gain back (we include override to
3277 * get a full raise factor).
3279 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) ||
3280 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) ||
3281 (timely_says == 0) ||
3282 (rack_down_raise_thresh == 0)) {
3284 * Do an override up in b/w if we were
3285 * below the threshold or if the threshold
3286 * is zero we always do the raise.
3288 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1);
3290 /* Log it stays the same */
3291 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0,
3294 rack->rc_gp_timely_dec_cnt++;
3295 /* We are not incrementing really no-count */
3296 rack->rc_gp_incr = 0;
3297 rack->rc_gp_timely_inc_cnt = 0;
3300 * Lets just use the RTT
3301 * information and give up
3306 } else if ((timely_says != 2) &&
3308 (last_bw_est > up_bnd)) {
3310 * We are increasing b/w lets keep going, updating
3311 * our b/w and ignoring any timely input, unless
3312 * of course we are at our max raise (if there is one).
3315 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3317 rack->r_ctl.last_gp_comp_bw = cur_bw;
3318 if (rack->rc_gp_saw_ss &&
3319 rack_per_upper_bound_ss &&
3320 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) {
3322 * In cases where we can't go higher
3323 * we should just use timely.
3327 if (rack->rc_gp_saw_ca &&
3328 rack_per_upper_bound_ca &&
3329 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) {
3331 * In cases where we can't go higher
3332 * we should just use timely.
3336 rack->rc_gp_bwred = 0;
3337 rack->rc_gp_timely_dec_cnt = 0;
3338 /* You get a set number of pushes if timely is trying to reduce */
3339 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) {
3340 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
3342 /* Log it stays the same */
3343 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0,
3349 * We are staying between the lower and upper range bounds
3350 * so use timely to decide.
3352 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3356 rack->rc_gp_incr = 0;
3357 rack->rc_gp_timely_inc_cnt = 0;
3358 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) &&
3360 (last_bw_est < low_bnd)) {
3361 /* We are loosing ground */
3362 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
3363 rack->rc_gp_timely_dec_cnt++;
3364 /* We are not incrementing really no-count */
3365 rack->rc_gp_incr = 0;
3366 rack->rc_gp_timely_inc_cnt = 0;
3368 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
3370 rack->rc_gp_bwred = 0;
3371 rack->rc_gp_timely_dec_cnt = 0;
3372 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
3378 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt)
3380 int32_t timely_says;
3381 uint64_t log_mult, log_rtt_a_diff;
3383 log_rtt_a_diff = rtt;
3384 log_rtt_a_diff <<= 32;
3385 log_rtt_a_diff |= (uint32_t)rtt_diff;
3386 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) *
3387 rack_gp_rtt_maxmul)) {
3388 /* Reduce the b/w multipler */
3390 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
3392 log_mult |= prev_rtt;
3393 rack_log_timely(rack, timely_says, log_mult,
3394 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3395 log_rtt_a_diff, __LINE__, 4);
3396 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
3397 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
3398 max(rack_gp_rtt_mindiv , 1)))) {
3399 /* Increase the b/w multipler */
3400 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
3401 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
3402 max(rack_gp_rtt_mindiv , 1));
3404 log_mult |= prev_rtt;
3406 rack_log_timely(rack, timely_says, log_mult ,
3407 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3408 log_rtt_a_diff, __LINE__, 5);
3411 * Use a gradient to find it the timely gradient
3413 * grad = rc_rtt_diff / min_rtt;
3415 * anything below or equal to 0 will be
3416 * a increase indication. Anything above
3417 * zero is a decrease. Note we take care
3418 * of the actual gradient calculation
3419 * in the reduction (its not needed for
3422 log_mult = prev_rtt;
3423 if (rtt_diff <= 0) {
3425 * Rttdiff is less than zero, increase the
3426 * b/w multipler (its 0 or negative)
3429 rack_log_timely(rack, timely_says, log_mult,
3430 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6);
3432 /* Reduce the b/w multipler */
3434 rack_log_timely(rack, timely_says, log_mult,
3435 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7);
3438 return (timely_says);
3442 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
3443 tcp_seq th_ack, int line)
3445 uint64_t tim, bytes_ps, ltim, stim, utim;
3446 uint32_t segsiz, bytes, reqbytes, us_cts;
3447 int32_t gput, new_rtt_diff, timely_says;
3449 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
3450 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
3451 if (TSTMP_GEQ(us_cts, tp->gput_ts))
3452 tim = us_cts - tp->gput_ts;
3456 if (TSTMP_GT(rack->r_ctl.rc_gp_cumack_ts, rack->r_ctl.rc_gp_output_ts))
3457 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts;
3461 * Use the larger of the send time or ack time. This prevents us
3462 * from being influenced by ack artifacts to come up with too
3463 * high of measurement. Note that since we are spanning over many more
3464 * bytes in most of our measurements hopefully that is less likely to
3470 utim = max(stim, 1);
3471 /* Lets validate utim */
3472 ltim = max(1, (utim/HPTS_USEC_IN_MSEC));
3473 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim;
3474 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz));
3475 if ((tim == 0) && (stim == 0)) {
3477 * Invalid measurement time, maybe
3478 * all on one ack/one send?
3482 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
3483 0, 0, 0, 10, __LINE__, NULL);
3484 goto skip_measurement;
3486 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) {
3487 /* We never made a us_rtt measurement? */
3490 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
3491 0, 0, 0, 10, __LINE__, NULL);
3492 goto skip_measurement;
3495 * Calculate the maximum possible b/w this connection
3496 * could have. We base our calculation on the lowest
3497 * rtt we have seen during the measurement and the
3498 * largest rwnd the client has given us in that time. This
3499 * forms a BDP that is the maximum that we could ever
3500 * get to the client. Anything larger is not valid.
3502 * I originally had code here that rejected measurements
3503 * where the time was less than 1/2 the latest us_rtt.
3504 * But after thinking on that I realized its wrong since
3505 * say you had a 150Mbps or even 1Gbps link, and you
3506 * were a long way away.. example I am in Europe (100ms rtt)
3507 * talking to my 1Gbps link in S.C. Now measuring say 150,000
3508 * bytes my time would be 1.2ms, and yet my rtt would say
3509 * the measurement was invalid the time was < 50ms. The
3510 * same thing is true for 150Mb (8ms of time).
3512 * A better way I realized is to look at what the maximum
3513 * the connection could possibly do. This is gated on
3514 * the lowest RTT we have seen and the highest rwnd.
3515 * We should in theory never exceed that, if we are
3516 * then something on the path is storing up packets
3517 * and then feeding them all at once to our endpoint
3518 * messing up our measurement.
3520 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd;
3521 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC;
3522 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt;
3523 if (SEQ_LT(th_ack, tp->gput_seq)) {
3524 /* No measurement can be made */
3527 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
3528 0, 0, 0, 10, __LINE__, NULL);
3529 goto skip_measurement;
3531 bytes = (th_ack - tp->gput_seq);
3532 bytes_ps = (uint64_t)bytes;
3534 * Don't measure a b/w for pacing unless we have gotten at least
3535 * an initial windows worth of data in this measurement interval.
3537 * Small numbers of bytes get badly influenced by delayed ack and
3538 * other artifacts. Note we take the initial window or our
3539 * defined minimum GP (defaulting to 10 which hopefully is the
3542 if (rack->rc_gp_filled == 0) {
3544 * The initial estimate is special. We
3545 * have blasted out an IW worth of packets
3546 * without a real valid ack ts results. We
3547 * then setup the app_limited_needs_set flag,
3548 * this should get the first ack in (probably 2
3549 * MSS worth) to be recorded as the timestamp.
3550 * We thus allow a smaller number of bytes i.e.
3553 reqbytes -= (2 * segsiz);
3554 /* Also lets fill previous for our first measurement to be neutral */
3555 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
3557 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) {
3558 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
3559 rack->r_ctl.rc_app_limited_cnt,
3560 0, 0, 10, __LINE__, NULL);
3561 goto skip_measurement;
3564 * We now need to calculate the Timely like status so
3565 * we can update (possibly) the b/w multipliers.
3567 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt;
3568 if (rack->rc_gp_filled == 0) {
3569 /* No previous reading */
3570 rack->r_ctl.rc_rtt_diff = new_rtt_diff;
3572 if (rack->measure_saw_probe_rtt == 0) {
3574 * We don't want a probertt to be counted
3575 * since it will be negative incorrectly. We
3576 * expect to be reducing the RTT when we
3577 * pace at a slower rate.
3579 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8);
3580 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8);
3583 timely_says = rack_make_timely_judgement(rack,
3584 rack->r_ctl.rc_gp_srtt,
3585 rack->r_ctl.rc_rtt_diff,
3586 rack->r_ctl.rc_prev_gp_srtt
3588 bytes_ps *= HPTS_USEC_IN_SEC;
3590 if (bytes_ps > rack->r_ctl.last_max_bw) {
3592 * Something is on path playing
3593 * since this b/w is not possible based
3594 * on our BDP (highest rwnd and lowest rtt
3595 * we saw in the measurement window).
3597 * Another option here would be to
3598 * instead skip the measurement.
3600 rack_log_pacing_delay_calc(rack, bytes, reqbytes,
3601 bytes_ps, rack->r_ctl.last_max_bw, 0,
3602 11, __LINE__, NULL);
3603 bytes_ps = rack->r_ctl.last_max_bw;
3605 /* We store gp for b/w in bytes per second */
3606 if (rack->rc_gp_filled == 0) {
3607 /* Initial measurment */
3609 rack->r_ctl.gp_bw = bytes_ps;
3610 rack->rc_gp_filled = 1;
3611 rack->r_ctl.num_avg = 1;
3612 rack_set_pace_segments(rack->rc_tp, rack, __LINE__);
3614 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
3615 rack->r_ctl.rc_app_limited_cnt,
3616 0, 0, 10, __LINE__, NULL);
3618 if (rack->rc_inp->inp_in_hpts &&
3619 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
3621 * Ok we can't trust the pacer in this case
3622 * where we transition from un-paced to paced.
3623 * Or for that matter when the burst mitigation
3624 * was making a wild guess and got it wrong.
3625 * Stop the pacer and clear up all the aggregate
3628 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
3629 rack->r_ctl.rc_hpts_flags = 0;
3630 rack->r_ctl.rc_last_output_to = 0;
3632 } else if (rack->r_ctl.num_avg < RACK_REQ_AVG) {
3633 /* Still a small number run an average */
3634 rack->r_ctl.gp_bw += bytes_ps;
3635 rack->r_ctl.num_avg++;
3636 if (rack->r_ctl.num_avg >= RACK_REQ_AVG) {
3637 /* We have collected enought to move forward */
3638 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_avg;
3642 * We want to take 1/wma of the goodput and add in to 7/8th
3643 * of the old value weighted by the srtt. So if your measurement
3644 * period is say 2 SRTT's long you would get 1/4 as the
3645 * value, if it was like 1/2 SRTT then you would get 1/16th.
3647 * But we must be careful not to take too much i.e. if the
3648 * srtt is say 20ms and the measurement is taken over
3649 * 400ms our weight would be 400/20 i.e. 20. On the
3650 * other hand if we get a measurement over 1ms with a
3651 * 10ms rtt we only want to take a much smaller portion.
3653 uint64_t resid_bw, subpart, addpart, srtt;
3655 srtt = ((uint64_t)TICKS_2_USEC(tp->t_srtt) >> TCP_RTT_SHIFT);
3658 * Strange why did t_srtt go back to zero?
3660 if (rack->r_ctl.rc_rack_min_rtt)
3661 srtt = (rack->r_ctl.rc_rack_min_rtt * HPTS_USEC_IN_MSEC);
3663 srtt = HPTS_USEC_IN_MSEC;
3666 * XXXrrs: Note for reviewers, in playing with
3667 * dynamic pacing I discovered this GP calculation
3668 * as done originally leads to some undesired results.
3669 * Basically you can get longer measurements contributing
3670 * too much to the WMA. Thus I changed it if you are doing
3671 * dynamic adjustments to only do the aportioned adjustment
3672 * if we have a very small (time wise) measurement. Longer
3673 * measurements just get there weight (defaulting to 1/8)
3674 * add to the WMA. We may want to think about changing
3675 * this to always do that for both sides i.e. dynamic
3676 * and non-dynamic... but considering lots of folks
3677 * were playing with this I did not want to change the
3678 * calculation per.se. without your thoughts.. Lawerence?
3681 if (rack->rc_gp_dyn_mul == 0) {
3682 subpart = rack->r_ctl.gp_bw * utim;
3683 subpart /= (srtt * 8);
3684 if (subpart < (rack->r_ctl.gp_bw / 2)) {
3686 * The b/w update takes no more
3687 * away then 1/2 our running total
3690 addpart = bytes_ps * utim;
3691 addpart /= (srtt * 8);
3694 * Don't allow a single measurement
3695 * to account for more than 1/2 of the
3696 * WMA. This could happen on a retransmission
3697 * where utim becomes huge compared to
3698 * srtt (multiple retransmissions when using
3699 * the sending rate which factors in all the
3700 * transmissions from the first one).
3702 subpart = rack->r_ctl.gp_bw / 2;
3703 addpart = bytes_ps / 2;
3705 resid_bw = rack->r_ctl.gp_bw - subpart;
3706 rack->r_ctl.gp_bw = resid_bw + addpart;
3708 if ((utim / srtt) <= 1) {
3710 * The b/w update was over a small period
3711 * of time. The idea here is to prevent a small
3712 * measurement time period from counting
3713 * too much. So we scale it based on the
3714 * time so it attributes less than 1/rack_wma_divisor
3715 * of its measurement.
3717 subpart = rack->r_ctl.gp_bw * utim;
3718 subpart /= (srtt * rack_wma_divisor);
3719 addpart = bytes_ps * utim;
3720 addpart /= (srtt * rack_wma_divisor);
3723 * The scaled measurement was long
3724 * enough so lets just add in the
3725 * portion of the measurment i.e. 1/rack_wma_divisor
3727 subpart = rack->r_ctl.gp_bw / rack_wma_divisor;
3728 addpart = bytes_ps / rack_wma_divisor;
3730 if ((rack->measure_saw_probe_rtt == 0) ||
3731 (bytes_ps > rack->r_ctl.gp_bw)) {
3733 * For probe-rtt we only add it in
3734 * if its larger, all others we just
3737 resid_bw = rack->r_ctl.gp_bw - subpart;
3738 rack->r_ctl.gp_bw = resid_bw + addpart;
3742 /* We do not update any multipliers if we are in or have seen a probe-rtt */
3743 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set)
3744 rack_update_multiplier(rack, timely_says, bytes_ps,
3745 rack->r_ctl.rc_gp_srtt,
3746 rack->r_ctl.rc_rtt_diff);
3747 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim,
3748 rack_get_bw(rack), 3, line, NULL);
3749 /* reset the gp srtt and setup the new prev */
3750 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
3751 /* Record the lost count for the next measurement */
3752 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count;
3754 * We restart our diffs based on the gpsrtt in the
3755 * measurement window.
3757 rack->rc_gp_rtt_set = 0;
3758 rack->rc_gp_saw_rec = 0;
3759 rack->rc_gp_saw_ca = 0;
3760 rack->rc_gp_saw_ss = 0;
3761 rack->rc_dragged_bottom = 0;
3765 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
3768 * XXXLAS: This is a temporary hack, and should be
3769 * chained off VOI_TCP_GPUT when stats(9) grows an
3770 * API to deal with chained VOIs.
3772 if (tp->t_stats_gput_prev > 0)
3773 stats_voi_update_abs_s32(tp->t_stats,
3775 ((gput - tp->t_stats_gput_prev) * 100) /
3776 tp->t_stats_gput_prev);
3778 tp->t_flags &= ~TF_GPUTINPROG;
3779 tp->t_stats_gput_prev = gput;
3781 * Now are we app limited now and there is space from where we
3782 * were to where we want to go?
3784 * We don't do the other case i.e. non-applimited here since
3785 * the next send will trigger us picking up the missing data.
3787 if (rack->r_ctl.rc_first_appl &&
3788 TCPS_HAVEESTABLISHED(tp->t_state) &&
3789 rack->r_ctl.rc_app_limited_cnt &&
3790 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) &&
3791 ((rack->r_ctl.rc_first_appl->r_start - th_ack) >
3792 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
3794 * Yep there is enough outstanding to make a measurement here.
3796 struct rack_sendmap *rsm, fe;
3798 tp->t_flags |= TF_GPUTINPROG;
3799 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
3800 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
3801 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
3802 rack->app_limited_needs_set = 0;
3803 tp->gput_seq = th_ack;
3804 if (rack->in_probe_rtt)
3805 rack->measure_saw_probe_rtt = 1;
3806 else if ((rack->measure_saw_probe_rtt) &&
3807 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
3808 rack->measure_saw_probe_rtt = 0;
3809 if ((rack->r_ctl.rc_first_appl->r_start - th_ack) >= rack_get_measure_window(tp, rack)) {
3810 /* There is a full window to gain info from */
3811 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
3813 /* We can only measure up to the applimited point */
3814 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_start - th_ack);
3817 * Now we need to find the timestamp of the send at tp->gput_seq
3818 * for the send based measurement.
3820 fe.r_start = tp->gput_seq;
3821 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
3823 /* Ok send-based limit is set */
3824 if (SEQ_LT(rsm->r_start, tp->gput_seq)) {
3826 * Move back to include the earlier part
3827 * so our ack time lines up right (this may
3828 * make an overlapping measurement but thats
3831 tp->gput_seq = rsm->r_start;
3833 if (rsm->r_flags & RACK_ACKED)
3834 tp->gput_ts = rsm->r_ack_arrival;
3836 rack->app_limited_needs_set = 1;
3837 rack->r_ctl.rc_gp_output_ts = rsm->usec_orig_send;
3840 * If we don't find the rsm due to some
3841 * send-limit set the current time, which
3842 * basically disables the send-limit.
3844 rack->r_ctl.rc_gp_output_ts = tcp_get_usecs(NULL);
3846 rack_log_pacing_delay_calc(rack,
3851 rack->r_ctl.rc_app_limited_cnt,
3858 * CC wrapper hook functions
3861 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, struct tcphdr *th, uint16_t nsegs,
3862 uint16_t type, int32_t recovery)
3864 INP_WLOCK_ASSERT(tp->t_inpcb);
3865 tp->ccv->nsegs = nsegs;
3866 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
3867 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
3870 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp);
3871 if (tp->ccv->bytes_this_ack > max) {
3872 tp->ccv->bytes_this_ack = max;
3875 if (rack->r_ctl.cwnd_to_use <= tp->snd_wnd)
3876 tp->ccv->flags |= CCF_CWND_LIMITED;
3878 tp->ccv->flags &= ~CCF_CWND_LIMITED;
3880 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
3881 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd);
3883 if ((tp->t_flags & TF_GPUTINPROG) &&
3884 rack_enough_for_measurement(tp, rack, th->th_ack)) {
3885 /* Measure the Goodput */
3886 rack_do_goodput_measurement(tp, rack, th->th_ack, __LINE__);
3887 #ifdef NETFLIX_PEAKRATE
3888 if ((type == CC_ACK) &&
3889 (tp->t_maxpeakrate)) {
3891 * We update t_peakrate_thr. This gives us roughly
3892 * one update per round trip time. Note
3893 * it will only be used if pace_always is off i.e
3894 * we don't do this for paced flows.
3896 tcp_update_peakrate_thr(tp);
3900 if (rack->r_ctl.cwnd_to_use > tp->snd_ssthresh) {
3901 tp->t_bytes_acked += tp->ccv->bytes_this_ack;
3902 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) {
3903 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use;
3904 tp->ccv->flags |= CCF_ABC_SENTAWND;
3907 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
3908 tp->t_bytes_acked = 0;
3910 if (CC_ALGO(tp)->ack_received != NULL) {
3911 /* XXXLAS: Find a way to live without this */
3912 tp->ccv->curack = th->th_ack;
3913 CC_ALGO(tp)->ack_received(tp->ccv, type);
3916 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use);
3918 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) {
3919 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use;
3921 #ifdef NETFLIX_PEAKRATE
3922 /* we enforce max peak rate if it is set and we are not pacing */
3923 if ((rack->rc_always_pace == 0) &&
3924 tp->t_peakrate_thr &&
3925 (tp->snd_cwnd > tp->t_peakrate_thr)) {
3926 tp->snd_cwnd = tp->t_peakrate_thr;
3932 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th)
3934 struct tcp_rack *rack;
3936 rack = (struct tcp_rack *)tp->t_fb_ptr;
3937 INP_WLOCK_ASSERT(tp->t_inpcb);
3939 * If we are doing PRR and have enough
3940 * room to send <or> we are pacing and prr
3941 * is disabled we will want to see if we
3942 * can send data (by setting r_wanted_output to
3945 if ((rack->r_ctl.rc_prr_sndcnt > 0) ||
3947 rack->r_wanted_output = 1;
3951 rack_post_recovery(struct tcpcb *tp, struct tcphdr *th)
3953 struct tcp_rack *rack;
3956 orig_cwnd = tp->snd_cwnd;
3957 INP_WLOCK_ASSERT(tp->t_inpcb);
3958 rack = (struct tcp_rack *)tp->t_fb_ptr;
3959 if (rack->rc_not_backing_off == 0) {
3960 /* only alert CC if we alerted when we entered */
3961 if (CC_ALGO(tp)->post_recovery != NULL) {
3962 tp->ccv->curack = th->th_ack;
3963 CC_ALGO(tp)->post_recovery(tp->ccv);
3965 if (tp->snd_cwnd > tp->snd_ssthresh) {
3966 /* Drop us down to the ssthresh (1/2 cwnd at loss) */
3967 tp->snd_cwnd = tp->snd_ssthresh;
3970 if ((rack->rack_no_prr == 0) &&
3971 (rack->r_ctl.rc_prr_sndcnt > 0)) {
3972 /* Suck the next prr cnt back into cwnd */
3973 tp->snd_cwnd += rack->r_ctl.rc_prr_sndcnt;
3974 rack->r_ctl.rc_prr_sndcnt = 0;
3975 rack_log_to_prr(rack, 1, 0);
3977 rack_log_to_prr(rack, 14, orig_cwnd);
3978 tp->snd_recover = tp->snd_una;
3979 EXIT_RECOVERY(tp->t_flags);
3983 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
3985 struct tcp_rack *rack;
3987 INP_WLOCK_ASSERT(tp->t_inpcb);
3989 rack = (struct tcp_rack *)tp->t_fb_ptr;
3992 tp->t_flags &= ~TF_WASFRECOVERY;
3993 tp->t_flags &= ~TF_WASCRECOVERY;
3994 if (!IN_FASTRECOVERY(tp->t_flags)) {
3995 rack->r_ctl.rc_prr_delivered = 0;
3996 rack->r_ctl.rc_prr_out = 0;
3997 if (rack->rack_no_prr == 0) {
3998 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
3999 rack_log_to_prr(rack, 2, 0);
4001 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
4002 tp->snd_recover = tp->snd_max;
4003 if (tp->t_flags2 & TF2_ECN_PERMIT)
4004 tp->t_flags2 |= TF2_ECN_SND_CWR;
4008 if (!IN_CONGRECOVERY(tp->t_flags) ||
4010 * Allow ECN reaction on ACK to CWR, if
4011 * that data segment was also CE marked.
4013 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
4014 EXIT_CONGRECOVERY(tp->t_flags);
4015 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd);
4016 tp->snd_recover = tp->snd_max + 1;
4017 if (tp->t_flags2 & TF2_ECN_PERMIT)
4018 tp->t_flags2 |= TF2_ECN_SND_CWR;
4023 tp->t_bytes_acked = 0;
4024 EXIT_RECOVERY(tp->t_flags);
4025 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 /
4026 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp);
4027 tp->snd_cwnd = ctf_fixed_maxseg(tp);
4028 if (tp->t_flags2 & TF2_ECN_PERMIT)
4029 tp->t_flags2 |= TF2_ECN_SND_CWR;
4032 KMOD_TCPSTAT_INC(tcps_sndrexmitbad);
4033 /* RTO was unnecessary, so reset everything. */
4034 tp->snd_cwnd = tp->snd_cwnd_prev;
4035 tp->snd_ssthresh = tp->snd_ssthresh_prev;
4036 tp->snd_recover = tp->snd_recover_prev;
4037 if (tp->t_flags & TF_WASFRECOVERY) {
4038 ENTER_FASTRECOVERY(tp->t_flags);
4039 tp->t_flags &= ~TF_WASFRECOVERY;
4041 if (tp->t_flags & TF_WASCRECOVERY) {
4042 ENTER_CONGRECOVERY(tp->t_flags);
4043 tp->t_flags &= ~TF_WASCRECOVERY;
4045 tp->snd_nxt = tp->snd_max;
4046 tp->t_badrxtwin = 0;
4050 * If we are below our max rtt, don't
4051 * signal the CC control to change things.
4052 * instead set it up so that we are in
4053 * recovery but not going to back off.
4056 if (rack->rc_highly_buffered) {
4058 * Do we use the higher rtt for
4059 * our threshold to not backoff (like CDG)?
4061 uint32_t rtt_mul, rtt_div;
4063 if (rack_use_max_for_nobackoff) {
4064 rtt_mul = (rack_gp_rtt_maxmul - 1);
4067 rtt_mul = rack_gp_rtt_minmul;
4068 rtt_div = max(rack_gp_rtt_mindiv , 1);
4070 if (rack->r_ctl.rc_gp_srtt <= (rack->r_ctl.rc_lowest_us_rtt +
4071 ((rack->r_ctl.rc_lowest_us_rtt * rtt_mul) /
4073 /* below our min threshold */
4074 rack->rc_not_backing_off = 1;
4075 ENTER_RECOVERY(rack->rc_tp->t_flags);
4076 rack_log_rtt_shrinks(rack, 0,
4079 RACK_RTTS_NOBACKOFF);
4083 rack->rc_not_backing_off = 0;
4084 if (CC_ALGO(tp)->cong_signal != NULL) {
4086 tp->ccv->curack = th->th_ack;
4087 CC_ALGO(tp)->cong_signal(tp->ccv, type);
4092 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp)
4096 INP_WLOCK_ASSERT(tp->t_inpcb);
4098 #ifdef NETFLIX_STATS
4099 KMOD_TCPSTAT_INC(tcps_idle_restarts);
4100 if (tp->t_state == TCPS_ESTABLISHED)
4101 KMOD_TCPSTAT_INC(tcps_idle_estrestarts);
4103 if (CC_ALGO(tp)->after_idle != NULL)
4104 CC_ALGO(tp)->after_idle(tp->ccv);
4106 if (tp->snd_cwnd == 1)
4107 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
4109 i_cwnd = rc_init_window(rack);
4112 * Being idle is no differnt than the initial window. If the cc
4113 * clamps it down below the initial window raise it to the initial
4116 if (tp->snd_cwnd < i_cwnd) {
4117 tp->snd_cwnd = i_cwnd;
4122 * Indicate whether this ack should be delayed. We can delay the ack if
4123 * following conditions are met:
4124 * - There is no delayed ack timer in progress.
4125 * - Our last ack wasn't a 0-sized window. We never want to delay
4126 * the ack that opens up a 0-sized window.
4127 * - LRO wasn't used for this segment. We make sure by checking that the
4128 * segment size is not larger than the MSS.
4129 * - Delayed acks are enabled or this is a half-synchronized T/TCP
4132 #define DELAY_ACK(tp, tlen) \
4133 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
4134 ((tp->t_flags & TF_DELACK) == 0) && \
4135 (tlen <= tp->t_maxseg) && \
4136 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
4138 static struct rack_sendmap *
4139 rack_find_lowest_rsm(struct tcp_rack *rack)
4141 struct rack_sendmap *rsm;
4144 * Walk the time-order transmitted list looking for an rsm that is
4145 * not acked. This will be the one that was sent the longest time
4146 * ago that is still outstanding.
4148 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
4149 if (rsm->r_flags & RACK_ACKED) {
4158 static struct rack_sendmap *
4159 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
4161 struct rack_sendmap *prsm;
4164 * Walk the sequence order list backward until we hit and arrive at
4165 * the highest seq not acked. In theory when this is called it
4166 * should be the last segment (which it was not).
4168 counter_u64_add(rack_find_high, 1);
4170 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) {
4171 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
4180 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts)
4186 * lro is the flag we use to determine if we have seen reordering.
4187 * If it gets set we have seen reordering. The reorder logic either
4188 * works in one of two ways:
4190 * If reorder-fade is configured, then we track the last time we saw
4191 * re-ordering occur. If we reach the point where enough time as
4192 * passed we no longer consider reordering has occuring.
4194 * Or if reorder-face is 0, then once we see reordering we consider
4195 * the connection to alway be subject to reordering and just set lro
4198 * In the end if lro is non-zero we add the extra time for
4203 if (rack->r_ctl.rc_reorder_ts) {
4204 if (rack->r_ctl.rc_reorder_fade) {
4205 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
4206 lro = cts - rack->r_ctl.rc_reorder_ts;
4209 * No time as passed since the last
4210 * reorder, mark it as reordering.
4215 /* Negative time? */
4218 if (lro > rack->r_ctl.rc_reorder_fade) {
4219 /* Turn off reordering seen too */
4220 rack->r_ctl.rc_reorder_ts = 0;
4224 /* Reodering does not fade */
4230 thresh = srtt + rack->r_ctl.rc_pkt_delay;
4232 /* It must be set, if not you get 1/4 rtt */
4233 if (rack->r_ctl.rc_reorder_shift)
4234 thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
4236 thresh += (srtt >> 2);
4240 /* We don't let the rack timeout be above a RTO */
4241 if (thresh > TICKS_2_MSEC(rack->rc_tp->t_rxtcur)) {
4242 thresh = TICKS_2_MSEC(rack->rc_tp->t_rxtcur);
4244 /* And we don't want it above the RTO max either */
4245 if (thresh > rack_rto_max) {
4246 thresh = rack_rto_max;
4252 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
4253 struct rack_sendmap *rsm, uint32_t srtt)
4255 struct rack_sendmap *prsm;
4256 uint32_t thresh, len;
4261 if (rack->r_ctl.rc_tlp_threshold)
4262 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
4264 thresh = (srtt * 2);
4266 /* Get the previous sent packet, if any */
4267 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
4268 counter_u64_add(rack_enter_tlp_calc, 1);
4269 len = rsm->r_end - rsm->r_start;
4270 if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
4271 /* Exactly like the ID */
4272 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) {
4273 uint32_t alt_thresh;
4275 * Compensate for delayed-ack with the d-ack time.
4277 counter_u64_add(rack_used_tlpmethod, 1);
4278 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
4279 if (alt_thresh > thresh)
4280 thresh = alt_thresh;
4282 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
4284 prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
4285 if (prsm && (len <= segsiz)) {
4287 * Two packets outstanding, thresh should be (2*srtt) +
4288 * possible inter-packet delay (if any).
4290 uint32_t inter_gap = 0;
4293 counter_u64_add(rack_used_tlpmethod, 1);
4294 idx = rsm->r_rtr_cnt - 1;
4295 nidx = prsm->r_rtr_cnt - 1;
4296 if (TSTMP_GEQ(rsm->r_tim_lastsent[nidx], prsm->r_tim_lastsent[idx])) {
4297 /* Yes it was sent later (or at the same time) */
4298 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
4300 thresh += inter_gap;
4301 } else if (len <= segsiz) {
4303 * Possibly compensate for delayed-ack.
4305 uint32_t alt_thresh;
4307 counter_u64_add(rack_used_tlpmethod2, 1);
4308 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
4309 if (alt_thresh > thresh)
4310 thresh = alt_thresh;
4312 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
4314 if (len <= segsiz) {
4315 uint32_t alt_thresh;
4317 * Compensate for delayed-ack with the d-ack time.
4319 counter_u64_add(rack_used_tlpmethod, 1);
4320 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
4321 if (alt_thresh > thresh)
4322 thresh = alt_thresh;
4325 /* Not above an RTO */
4326 if (thresh > TICKS_2_MSEC(tp->t_rxtcur)) {
4327 thresh = TICKS_2_MSEC(tp->t_rxtcur);
4329 /* Not above a RTO max */
4330 if (thresh > rack_rto_max) {
4331 thresh = rack_rto_max;
4333 /* Apply user supplied min TLP */
4334 if (thresh < rack_tlp_min) {
4335 thresh = rack_tlp_min;
4341 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack)
4344 * We want the rack_rtt which is the
4345 * last rtt we measured. However if that
4346 * does not exist we fallback to the srtt (which
4347 * we probably will never do) and then as a last
4348 * resort we use RACK_INITIAL_RTO if no srtt is
4351 if (rack->rc_rack_rtt)
4352 return(rack->rc_rack_rtt);
4353 else if (tp->t_srtt == 0)
4354 return(RACK_INITIAL_RTO);
4355 return (TICKS_2_MSEC(tp->t_srtt >> TCP_RTT_SHIFT));
4358 static struct rack_sendmap *
4359 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
4362 * Check to see that we don't need to fall into recovery. We will
4363 * need to do so if our oldest transmit is past the time we should
4366 struct tcp_rack *rack;
4367 struct rack_sendmap *rsm;
4369 uint32_t srtt, thresh;
4371 rack = (struct tcp_rack *)tp->t_fb_ptr;
4372 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
4375 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
4379 if (rsm->r_flags & RACK_ACKED) {
4380 rsm = rack_find_lowest_rsm(rack);
4384 idx = rsm->r_rtr_cnt - 1;
4385 srtt = rack_grab_rtt(tp, rack);
4386 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
4387 if (TSTMP_LT(tsused, rsm->r_tim_lastsent[idx])) {
4390 if ((tsused - rsm->r_tim_lastsent[idx]) < thresh) {
4393 /* Ok if we reach here we are over-due and this guy can be sent */
4394 if (IN_RECOVERY(tp->t_flags) == 0) {
4396 * For the one that enters us into recovery record undo
4399 rack->r_ctl.rc_rsm_start = rsm->r_start;
4400 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
4401 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
4403 rack_cong_signal(tp, NULL, CC_NDUPACK);
4408 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
4414 t = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT) + ((tp->t_rttvar * 4) >> TCP_RTT_SHIFT));
4415 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
4416 rack_persist_min, rack_persist_max);
4417 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
4419 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
4420 ret_val = (uint32_t)tt;
4425 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack)
4428 * Start the FR timer, we do this based on getting the first one in
4429 * the rc_tmap. Note that if its NULL we must stop the timer. in all
4430 * events we need to stop the running timer (if its running) before
4431 * starting the new one.
4433 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse;
4436 int32_t is_tlp_timer = 0;
4437 struct rack_sendmap *rsm;
4439 if (rack->t_timers_stopped) {
4440 /* All timers have been stopped none are to run */
4443 if (rack->rc_in_persist) {
4444 /* We can't start any timer in persists */
4445 return (rack_get_persists_timer_val(tp, rack));
4447 rack->rc_on_min_to = 0;
4448 if ((tp->t_state < TCPS_ESTABLISHED) ||
4449 ((tp->t_flags & TF_SACK_PERMIT) == 0))
4451 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
4452 if ((rsm == NULL) || sup_rack) {
4453 /* Nothing on the send map */
4455 time_since_sent = 0;
4456 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
4458 idx = rsm->r_rtr_cnt - 1;
4459 if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time))
4460 tstmp_touse = rsm->r_tim_lastsent[idx];
4462 tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time;
4463 if (TSTMP_GT(cts, tstmp_touse))
4464 time_since_sent = cts - tstmp_touse;
4466 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
4467 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
4468 to = TICKS_2_MSEC(tp->t_rxtcur);
4469 if (to > time_since_sent)
4470 to -= time_since_sent;
4472 to = rack->r_ctl.rc_min_to;
4479 if (rsm->r_flags & RACK_ACKED) {
4480 rsm = rack_find_lowest_rsm(rack);
4486 if (rack->sack_attack_disable) {
4488 * We don't want to do
4489 * any TLP's if you are an attacker.
4490 * Though if you are doing what
4491 * is expected you may still have
4492 * SACK-PASSED marks.
4496 /* Convert from ms to usecs */
4497 if ((rsm->r_flags & RACK_SACK_PASSED) || (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
4498 if ((tp->t_flags & TF_SENTFIN) &&
4499 ((tp->snd_max - tp->snd_una) == 1) &&
4500 (rsm->r_flags & RACK_HAS_FIN)) {
4502 * We don't start a rack timer if all we have is a
4507 if ((rack->use_rack_rr == 0) &&
4508 (IN_RECOVERY(tp->t_flags)) &&
4509 (rack->rack_no_prr == 0) &&
4510 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) {
4512 * We are not cheating, in recovery and
4513 * not enough ack's to yet get our next
4514 * retransmission out.
4516 * Note that classified attackers do not
4517 * get to use the rack-cheat.
4521 srtt = rack_grab_rtt(tp, rack);
4522 thresh = rack_calc_thresh_rack(rack, srtt, cts);
4523 idx = rsm->r_rtr_cnt - 1;
4524 exp = rsm->r_tim_lastsent[idx] + thresh;
4525 if (SEQ_GEQ(exp, cts)) {
4527 if (to < rack->r_ctl.rc_min_to) {
4528 to = rack->r_ctl.rc_min_to;
4529 if (rack->r_rr_config == 3)
4530 rack->rc_on_min_to = 1;
4533 to = rack->r_ctl.rc_min_to;
4534 if (rack->r_rr_config == 3)
4535 rack->rc_on_min_to = 1;
4538 /* Ok we need to do a TLP not RACK */
4540 if ((rack->rc_tlp_in_progress != 0) &&
4541 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) {
4543 * The previous send was a TLP and we have sent
4544 * N TLP's without sending new data.
4548 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
4550 /* We found no rsm to TLP with. */
4553 if (rsm->r_flags & RACK_HAS_FIN) {
4554 /* If its a FIN we dont do TLP */
4558 idx = rsm->r_rtr_cnt - 1;
4559 time_since_sent = 0;
4560 if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time))
4561 tstmp_touse = rsm->r_tim_lastsent[idx];
4563 tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time;
4564 if (TSTMP_GT(cts, tstmp_touse))
4565 time_since_sent = cts - tstmp_touse;
4568 srtt_cur = (tp->t_srtt >> TCP_RTT_SHIFT);
4569 srtt = TICKS_2_MSEC(srtt_cur);
4571 srtt = RACK_INITIAL_RTO;
4573 * If the SRTT is not keeping up and the
4574 * rack RTT has spiked we want to use
4575 * the last RTT not the smoothed one.
4577 if (rack_tlp_use_greater && (srtt < rack_grab_rtt(tp, rack)))
4578 srtt = rack_grab_rtt(tp, rack);
4579 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
4580 if (thresh > time_since_sent)
4581 to = thresh - time_since_sent;
4583 to = rack->r_ctl.rc_min_to;
4584 rack_log_alt_to_to_cancel(rack,
4586 time_since_sent, /* flex2 */
4587 tstmp_touse, /* flex3 */
4588 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */
4589 rsm->r_tim_lastsent[idx],
4593 if (to > TCPTV_REXMTMAX) {
4595 * If the TLP time works out to larger than the max
4596 * RTO lets not do TLP.. just RTO.
4601 if (is_tlp_timer == 0) {
4602 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
4604 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
4612 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
4614 if (rack->rc_in_persist == 0) {
4615 if (tp->t_flags & TF_GPUTINPROG) {
4617 * Stop the goodput now, the calling of the
4618 * measurement function clears the flag.
4620 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__);
4622 #ifdef NETFLIX_SHARED_CWND
4623 if (rack->r_ctl.rc_scw) {
4624 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
4625 rack->rack_scwnd_is_idle = 1;
4628 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
4629 if (rack->r_ctl.rc_went_idle_time == 0)
4630 rack->r_ctl.rc_went_idle_time = 1;
4631 rack_timer_cancel(tp, rack, cts, __LINE__);
4633 rack->rc_in_persist = 1;
4638 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
4640 if (rack->rc_inp->inp_in_hpts) {
4641 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
4642 rack->r_ctl.rc_hpts_flags = 0;
4644 #ifdef NETFLIX_SHARED_CWND
4645 if (rack->r_ctl.rc_scw) {
4646 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
4647 rack->rack_scwnd_is_idle = 0;
4650 if (rack->rc_gp_dyn_mul &&
4651 (rack->use_fixed_rate == 0) &&
4652 (rack->rc_always_pace)) {
4654 * Do we count this as if a probe-rtt just
4657 uint32_t time_idle, idle_min;
4659 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time;
4660 idle_min = rack_min_probertt_hold;
4661 if (rack_probertt_gpsrtt_cnt_div) {
4663 extra = (uint64_t)rack->r_ctl.rc_gp_srtt *
4664 (uint64_t)rack_probertt_gpsrtt_cnt_mul;
4665 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div;
4666 idle_min += (uint32_t)extra;
4668 if (time_idle >= idle_min) {
4669 /* Yes, we count it as a probe-rtt. */
4672 us_cts = tcp_get_usecs(NULL);
4673 if (rack->in_probe_rtt == 0) {
4674 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
4675 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
4676 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
4677 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
4679 rack_exit_probertt(rack, us_cts);
4683 rack->rc_in_persist = 0;
4684 rack->r_ctl.rc_went_idle_time = 0;
4686 rack->r_ctl.rc_agg_delayed = 0;
4689 rack->r_ctl.rc_agg_early = 0;
4693 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
4694 struct hpts_diag *diag, struct timeval *tv)
4696 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
4697 union tcp_log_stackspecific log;
4699 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4700 log.u_bbr.flex1 = diag->p_nxt_slot;
4701 log.u_bbr.flex2 = diag->p_cur_slot;
4702 log.u_bbr.flex3 = diag->slot_req;
4703 log.u_bbr.flex4 = diag->inp_hptsslot;
4704 log.u_bbr.flex5 = diag->slot_remaining;
4705 log.u_bbr.flex6 = diag->need_new_to;
4706 log.u_bbr.flex7 = diag->p_hpts_active;
4707 log.u_bbr.flex8 = diag->p_on_min_sleep;
4708 /* Hijack other fields as needed */
4709 log.u_bbr.epoch = diag->have_slept;
4710 log.u_bbr.lt_epoch = diag->yet_to_sleep;
4711 log.u_bbr.pkts_out = diag->co_ret;
4712 log.u_bbr.applimited = diag->hpts_sleep_time;
4713 log.u_bbr.delivered = diag->p_prev_slot;
4714 log.u_bbr.inflight = diag->p_runningtick;
4715 log.u_bbr.bw_inuse = diag->wheel_tick;
4716 log.u_bbr.rttProp = diag->wheel_cts;
4717 log.u_bbr.timeStamp = cts;
4718 log.u_bbr.delRate = diag->maxticks;
4719 log.u_bbr.cur_del_rate = diag->p_curtick;
4720 log.u_bbr.cur_del_rate <<= 32;
4721 log.u_bbr.cur_del_rate |= diag->p_lasttick;
4722 TCP_LOG_EVENTP(rack->rc_tp, NULL,
4723 &rack->rc_inp->inp_socket->so_rcv,
4724 &rack->rc_inp->inp_socket->so_snd,
4725 BBR_LOG_HPTSDIAG, 0,
4726 0, &log, false, tv);
4732 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
4733 int32_t slot, uint32_t tot_len_this_send, int sup_rack)
4735 struct hpts_diag diag;
4738 uint32_t delayed_ack = 0;
4739 uint32_t hpts_timeout;
4745 if ((tp->t_state == TCPS_CLOSED) ||
4746 (tp->t_state == TCPS_LISTEN)) {
4749 if (inp->inp_in_hpts) {
4750 /* Already on the pacer */
4753 stopped = rack->rc_tmr_stopped;
4754 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
4755 left = rack->r_ctl.rc_timer_exp - cts;
4757 rack->r_ctl.rc_timer_exp = 0;
4758 rack->r_ctl.rc_hpts_flags = 0;
4759 us_cts = tcp_get_usecs(&tv);
4760 /* Now early/late accounting */
4761 if (rack->r_early) {
4763 * We have a early carry over set,
4764 * we can always add more time so we
4765 * can always make this compensation.
4767 slot += rack->r_ctl.rc_agg_early;
4769 rack->r_ctl.rc_agg_early = 0;
4773 * This is harder, we can
4774 * compensate some but it
4775 * really depends on what
4776 * the current pacing time is.
4778 if (rack->r_ctl.rc_agg_delayed >= slot) {
4780 * We can't compensate for it all.
4781 * And we have to have some time
4782 * on the clock. We always have a min
4783 * 10 slots (10 x 10 i.e. 100 usecs).
4785 if (slot <= HPTS_TICKS_PER_USEC) {
4787 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_USEC - slot);
4788 slot = HPTS_TICKS_PER_USEC;
4790 /* We take off some */
4791 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_USEC);
4792 slot = HPTS_TICKS_PER_USEC;
4795 slot -= rack->r_ctl.rc_agg_delayed;
4796 rack->r_ctl.rc_agg_delayed = 0;
4797 /* Make sure we have 100 useconds at minimum */
4798 if (slot < HPTS_TICKS_PER_USEC) {
4799 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_USEC - slot;
4800 slot = HPTS_TICKS_PER_USEC;
4802 if (rack->r_ctl.rc_agg_delayed == 0)
4807 /* We are pacing too */
4808 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
4810 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack);
4811 #ifdef NETFLIX_EXP_DETECTION
4812 if (rack->sack_attack_disable &&
4813 (slot < tcp_sad_pacing_interval)) {
4815 * We have a potential attacker on
4816 * the line. We have possibly some
4817 * (or now) pacing time set. We want to
4818 * slow down the processing of sacks by some
4819 * amount (if it is an attacker). Set the default
4820 * slot for attackers in place (unless the orginal
4821 * interval is longer). Its stored in
4822 * micro-seconds, so lets convert to msecs.
4824 slot = tcp_sad_pacing_interval;
4827 if (tp->t_flags & TF_DELACK) {
4828 delayed_ack = TICKS_2_MSEC(tcp_delacktime);
4829 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
4831 if (delayed_ack && ((hpts_timeout == 0) ||
4832 (delayed_ack < hpts_timeout)))
4833 hpts_timeout = delayed_ack;
4835 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
4837 * If no timers are going to run and we will fall off the hptsi
4838 * wheel, we resort to a keep-alive timer if its configured.
4840 if ((hpts_timeout == 0) &&
4842 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
4843 (tp->t_state <= TCPS_CLOSING)) {
4845 * Ok we have no timer (persists, rack, tlp, rxt or
4846 * del-ack), we don't have segments being paced. So
4847 * all that is left is the keepalive timer.
4849 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
4850 /* Get the established keep-alive time */
4851 hpts_timeout = TP_KEEPIDLE(tp);
4853 /* Get the initial setup keep-alive time */
4854 hpts_timeout = TP_KEEPINIT(tp);
4856 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
4857 if (rack->in_probe_rtt) {
4859 * We want to instead not wake up a long time from
4860 * now but to wake up about the time we would
4861 * exit probe-rtt and initiate a keep-alive ack.
4862 * This will get us out of probe-rtt and update
4865 hpts_timeout = (rack_min_probertt_hold / HPTS_USEC_IN_MSEC);
4869 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
4870 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
4872 * RACK, TLP, persists and RXT timers all are restartable
4873 * based on actions input .. i.e we received a packet (ack
4874 * or sack) and that changes things (rw, or snd_una etc).
4875 * Thus we can restart them with a new value. For
4876 * keep-alive, delayed_ack we keep track of what was left
4877 * and restart the timer with a smaller value.
4879 if (left < hpts_timeout)
4880 hpts_timeout = left;
4884 * Hack alert for now we can't time-out over 2,147,483
4885 * seconds (a bit more than 596 hours), which is probably ok
4888 if (hpts_timeout > 0x7ffffffe)
4889 hpts_timeout = 0x7ffffffe;
4890 rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
4892 if ((rack->rc_gp_filled == 0) &&
4893 (hpts_timeout < slot) &&
4894 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
4896 * We have no good estimate yet for the
4897 * old clunky burst mitigation or the
4898 * real pacing. And the tlp or rxt is smaller
4899 * than the pacing calculation. Lets not
4900 * pace that long since we know the calculation
4901 * so far is not accurate.
4903 slot = hpts_timeout;
4905 rack->r_ctl.last_pacing_time = slot;
4907 rack->r_ctl.rc_last_output_to = us_cts + slot;
4908 if (rack->rc_always_pace || rack->r_mbuf_queue) {
4909 if ((rack->rc_gp_filled == 0) ||
4910 rack->pacing_longer_than_rtt) {
4911 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
4913 inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
4914 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) &&
4915 (rack->r_rr_config != 3))
4916 inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
4918 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
4921 if ((rack->use_rack_rr) &&
4922 (rack->r_rr_config < 2) &&
4923 ((hpts_timeout) && ((hpts_timeout * HPTS_USEC_IN_MSEC) < slot))) {
4925 * Arrange for the hpts to kick back in after the
4926 * t-o if the t-o does not cause a send.
4928 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout),
4930 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
4931 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
4933 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot),
4935 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
4936 rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
4938 } else if (hpts_timeout) {
4939 if (rack->rc_always_pace || rack->r_mbuf_queue) {
4940 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) {
4941 /* For a rack timer, don't wake us */
4942 inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
4943 if (rack->r_rr_config != 3)
4944 inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
4946 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
4948 /* All other timers wake us up */
4949 inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
4950 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
4953 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout),
4955 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
4956 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
4958 /* No timer starting */
4960 if (SEQ_GT(tp->snd_max, tp->snd_una)) {
4961 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
4962 tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
4966 rack->rc_tmr_stopped = 0;
4968 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv);
4972 * RACK Timer, here we simply do logging and house keeping.
4973 * the normal rack_output() function will call the
4974 * appropriate thing to check if we need to do a RACK retransmit.
4975 * We return 1, saying don't proceed with rack_output only
4976 * when all timers have been stopped (destroyed PCB?).
4979 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
4982 * This timer simply provides an internal trigger to send out data.
4983 * The check_recovery_mode call will see if there are needed
4984 * retransmissions, if so we will enter fast-recovery. The output
4985 * call may or may not do the same thing depending on sysctl
4988 struct rack_sendmap *rsm;
4991 if (tp->t_timers->tt_flags & TT_STOPPED) {
4994 recovery = IN_RECOVERY(tp->t_flags);
4995 counter_u64_add(rack_to_tot, 1);
4996 if (rack->r_state && (rack->r_state != tp->t_state))
4997 rack_set_state(tp, rack);
4998 rack->rc_on_min_to = 0;
4999 rsm = rack_check_recovery_mode(tp, cts);
5000 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm);
5004 rack->r_ctl.rc_resend = rsm;
5005 if (rack->use_rack_rr) {
5007 * Don't accumulate extra pacing delay
5008 * we are allowing the rack timer to
5009 * over-ride pacing i.e. rrr takes precedence
5010 * if the pacing interval is longer than the rrr
5011 * time (in other words we get the min pacing
5012 * time versus rrr pacing time).
5014 rack->r_timer_override = 1;
5015 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
5017 rtt = rack->rc_rack_rtt;
5020 if (rack->rack_no_prr == 0) {
5021 if ((recovery == 0) &&
5022 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) {
5024 * The rack-timeout that enter's us into recovery
5025 * will force out one MSS and set us up so that we
5026 * can do one more send in 2*rtt (transitioning the
5027 * rack timeout into a rack-tlp).
5029 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
5030 rack->r_timer_override = 1;
5031 rack_log_to_prr(rack, 3, 0);
5032 } else if ((rack->r_ctl.rc_prr_sndcnt < (rsm->r_end - rsm->r_start)) &&
5033 rack->use_rack_rr) {
5035 * When a rack timer goes, if the rack rr is
5036 * on, arrange it so we can send a full segment
5037 * overriding prr (though we pay a price for this
5038 * for future new sends).
5040 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
5041 rack_log_to_prr(rack, 4, 0);
5045 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
5047 /* restart a timer and return 1 */
5048 rack_start_hpts_timer(rack, tp, cts,
5055 static __inline void
5056 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm,
5057 struct rack_sendmap *rsm, uint32_t start)
5061 nrsm->r_start = start;
5062 nrsm->r_end = rsm->r_end;
5063 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
5064 nrsm->r_flags = rsm->r_flags;
5065 nrsm->r_dupack = rsm->r_dupack;
5066 nrsm->usec_orig_send = rsm->usec_orig_send;
5067 nrsm->r_rtr_bytes = 0;
5068 rsm->r_end = nrsm->r_start;
5069 nrsm->r_just_ret = rsm->r_just_ret;
5070 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
5071 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
5075 static struct rack_sendmap *
5076 rack_merge_rsm(struct tcp_rack *rack,
5077 struct rack_sendmap *l_rsm,
5078 struct rack_sendmap *r_rsm)
5081 * We are merging two ack'd RSM's,
5082 * the l_rsm is on the left (lower seq
5083 * values) and the r_rsm is on the right
5084 * (higher seq value). The simplest way
5085 * to merge these is to move the right
5086 * one into the left. I don't think there
5087 * is any reason we need to try to find
5088 * the oldest (or last oldest retransmitted).
5090 struct rack_sendmap *rm;
5092 l_rsm->r_end = r_rsm->r_end;
5093 if (l_rsm->r_dupack < r_rsm->r_dupack)
5094 l_rsm->r_dupack = r_rsm->r_dupack;
5095 if (r_rsm->r_rtr_bytes)
5096 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes;
5097 if (r_rsm->r_in_tmap) {
5098 /* This really should not happen */
5099 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext);
5100 r_rsm->r_in_tmap = 0;
5104 if (r_rsm->r_flags & RACK_HAS_FIN)
5105 l_rsm->r_flags |= RACK_HAS_FIN;
5106 if (r_rsm->r_flags & RACK_TLP)
5107 l_rsm->r_flags |= RACK_TLP;
5108 if (r_rsm->r_flags & RACK_RWND_COLLAPSED)
5109 l_rsm->r_flags |= RACK_RWND_COLLAPSED;
5110 if ((r_rsm->r_flags & RACK_APP_LIMITED) &&
5111 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) {
5113 * If both are app-limited then let the
5114 * free lower the count. If right is app
5115 * limited and left is not, transfer.
5117 l_rsm->r_flags |= RACK_APP_LIMITED;
5118 r_rsm->r_flags &= ~RACK_APP_LIMITED;
5119 if (r_rsm == rack->r_ctl.rc_first_appl)
5120 rack->r_ctl.rc_first_appl = l_rsm;
5122 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm);
5125 panic("removing head in rack:%p rsm:%p rm:%p",
5129 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) {
5130 /* Transfer the split limit to the map we free */
5131 r_rsm->r_limit_type = l_rsm->r_limit_type;
5132 l_rsm->r_limit_type = 0;
5134 rack_free(rack, r_rsm);
5139 * TLP Timer, here we simply setup what segment we want to
5140 * have the TLP expire on, the normal rack_output() will then
5143 * We return 1, saying don't proceed with rack_output only
5144 * when all timers have been stopped (destroyed PCB?).
5147 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5152 struct rack_sendmap *rsm = NULL;
5153 struct rack_sendmap *insret;
5155 uint32_t amm, old_prr_snd = 0;
5156 uint32_t out, avail;
5157 int collapsed_win = 0;
5159 if (tp->t_timers->tt_flags & TT_STOPPED) {
5162 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
5163 /* Its not time yet */
5166 if (ctf_progress_timeout_check(tp, true)) {
5167 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
5168 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
5172 * A TLP timer has expired. We have been idle for 2 rtts. So we now
5173 * need to figure out how to force a full MSS segment out.
5175 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL);
5176 counter_u64_add(rack_tlp_tot, 1);
5177 if (rack->r_state && (rack->r_state != tp->t_state))
5178 rack_set_state(tp, rack);
5179 so = tp->t_inpcb->inp_socket;
5180 avail = sbavail(&so->so_snd);
5181 out = tp->snd_max - tp->snd_una;
5182 if (out > tp->snd_wnd) {
5183 /* special case, we need a retransmission */
5188 * Check our send oldest always settings, and if
5189 * there is an oldest to send jump to the need_retran.
5191 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0))
5195 /* New data is available */
5197 if (amm > ctf_fixed_maxseg(tp)) {
5198 amm = ctf_fixed_maxseg(tp);
5199 if ((amm + out) > tp->snd_wnd) {
5200 /* We are rwnd limited */
5203 } else if (amm < ctf_fixed_maxseg(tp)) {
5204 /* not enough to fill a MTU */
5207 if (IN_RECOVERY(tp->t_flags)) {
5209 if (rack->rack_no_prr == 0) {
5210 old_prr_snd = rack->r_ctl.rc_prr_sndcnt;
5211 if (out + amm <= tp->snd_wnd) {
5212 rack->r_ctl.rc_prr_sndcnt = amm;
5213 rack_log_to_prr(rack, 4, 0);
5218 /* Set the send-new override */
5219 if (out + amm <= tp->snd_wnd)
5220 rack->r_ctl.rc_tlp_new_data = amm;
5224 rack->r_ctl.rc_tlpsend = NULL;
5225 counter_u64_add(rack_tlp_newdata, 1);
5230 * Ok we need to arrange the last un-acked segment to be re-sent, or
5231 * optionally the first un-acked segment.
5233 if (collapsed_win == 0) {
5234 if (rack_always_send_oldest)
5235 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5237 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
5238 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
5239 rsm = rack_find_high_nonack(rack, rsm);
5243 counter_u64_add(rack_tlp_does_nada, 1);
5245 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
5251 * We must find the last segment
5252 * that was acceptable by the client.
5254 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
5255 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) {
5261 /* None? if so send the first */
5262 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
5264 counter_u64_add(rack_tlp_does_nada, 1);
5266 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
5272 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) {
5274 * We need to split this the last segment in two.
5276 struct rack_sendmap *nrsm;
5278 nrsm = rack_alloc_full_limit(rack);
5281 * No memory to split, we will just exit and punt
5282 * off to the RXT timer.
5284 counter_u64_add(rack_tlp_does_nada, 1);
5287 rack_clone_rsm(rack, nrsm, rsm,
5288 (rsm->r_end - ctf_fixed_maxseg(tp)));
5289 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
5291 if (insret != NULL) {
5292 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
5293 nrsm, insret, rack, rsm);
5296 if (rsm->r_in_tmap) {
5297 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
5298 nrsm->r_in_tmap = 1;
5300 rsm->r_flags &= (~RACK_HAS_FIN);
5303 rack->r_ctl.rc_tlpsend = rsm;
5305 rack->r_timer_override = 1;
5306 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
5309 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
5314 * Delayed ack Timer, here we simply need to setup the
5315 * ACK_NOW flag and remove the DELACK flag. From there
5316 * the output routine will send the ack out.
5318 * We only return 1, saying don't proceed, if all timers
5319 * are stopped (destroyed PCB?).
5322 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5324 if (tp->t_timers->tt_flags & TT_STOPPED) {
5327 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL);
5328 tp->t_flags &= ~TF_DELACK;
5329 tp->t_flags |= TF_ACKNOW;
5330 KMOD_TCPSTAT_INC(tcps_delack);
5331 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
5336 * Persists timer, here we simply send the
5337 * same thing as a keepalive will.
5338 * the one byte send.
5340 * We only return 1, saying don't proceed, if all timers
5341 * are stopped (destroyed PCB?).
5344 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5346 struct tcptemp *t_template;
5352 if (tp->t_timers->tt_flags & TT_STOPPED) {
5355 if (rack->rc_in_persist == 0)
5357 if (ctf_progress_timeout_check(tp, false)) {
5358 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
5359 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
5360 tcp_set_inp_to_drop(inp, ETIMEDOUT);
5363 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
5365 * Persistence timer into zero window. Force a byte to be output, if
5368 KMOD_TCPSTAT_INC(tcps_persisttimeo);
5370 * Hack: if the peer is dead/unreachable, we do not time out if the
5371 * window is closed. After a full backoff, drop the connection if
5372 * the idle time (no responses to probes) reaches the maximum
5373 * backoff that we would use if retransmitting.
5375 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
5376 (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
5377 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
5378 KMOD_TCPSTAT_INC(tcps_persistdrop);
5380 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
5381 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
5384 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
5385 tp->snd_una == tp->snd_max)
5386 rack_exit_persist(tp, rack, cts);
5387 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
5389 * If the user has closed the socket then drop a persisting
5390 * connection after a much reduced timeout.
5392 if (tp->t_state > TCPS_CLOSE_WAIT &&
5393 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
5395 KMOD_TCPSTAT_INC(tcps_persistdrop);
5396 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
5397 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
5400 t_template = tcpip_maketemplate(rack->rc_inp);
5402 /* only set it if we were answered */
5403 if (rack->forced_ack == 0) {
5404 rack->forced_ack = 1;
5405 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
5407 tcp_respond(tp, t_template->tt_ipgen,
5408 &t_template->tt_t, (struct mbuf *)NULL,
5409 tp->rcv_nxt, tp->snd_una - 1, 0);
5410 /* This sends an ack */
5411 if (tp->t_flags & TF_DELACK)
5412 tp->t_flags &= ~TF_DELACK;
5413 free(t_template, M_TEMP);
5415 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
5418 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL);
5419 rack_start_hpts_timer(rack, tp, cts,
5425 * If a keepalive goes off, we had no other timers
5426 * happening. We always return 1 here since this
5427 * routine either drops the connection or sends
5428 * out a segment with respond.
5431 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5433 struct tcptemp *t_template;
5436 if (tp->t_timers->tt_flags & TT_STOPPED) {
5439 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
5441 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL);
5443 * Keep-alive timer went off; send something or drop connection if
5444 * idle for too long.
5446 KMOD_TCPSTAT_INC(tcps_keeptimeo);
5447 if (tp->t_state < TCPS_ESTABLISHED)
5449 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
5450 tp->t_state <= TCPS_CLOSING) {
5451 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
5454 * Send a packet designed to force a response if the peer is
5455 * up and reachable: either an ACK if the connection is
5456 * still alive, or an RST if the peer has closed the
5457 * connection due to timeout or reboot. Using sequence
5458 * number tp->snd_una-1 causes the transmitted zero-length
5459 * segment to lie outside the receive window; by the
5460 * protocol spec, this requires the correspondent TCP to
5463 KMOD_TCPSTAT_INC(tcps_keepprobe);
5464 t_template = tcpip_maketemplate(inp);
5466 if (rack->forced_ack == 0) {
5467 rack->forced_ack = 1;
5468 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
5470 tcp_respond(tp, t_template->tt_ipgen,
5471 &t_template->tt_t, (struct mbuf *)NULL,
5472 tp->rcv_nxt, tp->snd_una - 1, 0);
5473 free(t_template, M_TEMP);
5476 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
5479 KMOD_TCPSTAT_INC(tcps_keepdrops);
5480 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
5481 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
5486 * Retransmit helper function, clear up all the ack
5487 * flags and take care of important book keeping.
5490 rack_remxt_tmr(struct tcpcb *tp)
5493 * The retransmit timer went off, all sack'd blocks must be
5496 struct rack_sendmap *rsm, *trsm = NULL;
5497 struct tcp_rack *rack;
5500 rack = (struct tcp_rack *)tp->t_fb_ptr;
5501 rack_timer_cancel(tp, rack, tcp_ts_getticks(), __LINE__);
5502 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL);
5503 if (rack->r_state && (rack->r_state != tp->t_state))
5504 rack_set_state(tp, rack);
5506 * Ideally we would like to be able to
5507 * mark SACK-PASS on anything not acked here.
5508 * However, if we do that we would burst out
5509 * all that data 1ms apart. This would be unwise,
5510 * so for now we will just let the normal rxt timer
5511 * and tlp timer take care of it.
5513 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
5514 if (rsm->r_flags & RACK_ACKED) {
5517 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
5518 if (rsm->r_in_tmap == 0) {
5519 /* We must re-add it back to the tlist */
5521 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
5523 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
5529 if (rsm->r_flags & RACK_ACKED)
5530 rsm->r_flags |= RACK_WAS_ACKED;
5531 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS);
5533 /* Clear the count (we just un-acked them) */
5534 rack->r_ctl.rc_sacked = 0;
5535 rack->r_ctl.rc_agg_delayed = 0;
5537 rack->r_ctl.rc_agg_early = 0;
5539 /* Clear the tlp rtx mark */
5540 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
5541 rack->r_ctl.rc_prr_sndcnt = 0;
5542 rack_log_to_prr(rack, 6, 0);
5543 rack->r_timer_override = 1;
5547 rack_cc_conn_init(struct tcpcb *tp)
5549 struct tcp_rack *rack;
5551 rack = (struct tcp_rack *)tp->t_fb_ptr;
5554 * We want a chance to stay in slowstart as
5555 * we create a connection. TCP spec says that
5556 * initially ssthresh is infinite. For our
5557 * purposes that is the snd_wnd.
5559 if (tp->snd_ssthresh < tp->snd_wnd) {
5560 tp->snd_ssthresh = tp->snd_wnd;
5563 * We also want to assure a IW worth of
5564 * data can get inflight.
5566 if (rc_init_window(rack) < tp->snd_cwnd)
5567 tp->snd_cwnd = rc_init_window(rack);
5571 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
5572 * we will setup to retransmit the lowest seq number outstanding.
5575 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5583 if (tp->t_timers->tt_flags & TT_STOPPED) {
5586 if (ctf_progress_timeout_check(tp, false)) {
5587 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
5588 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
5589 tcp_set_inp_to_drop(inp, ETIMEDOUT);
5592 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
5593 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
5594 (tp->snd_una == tp->snd_max)) {
5595 /* Nothing outstanding .. nothing to do */
5599 * Retransmission timer went off. Message has not been acked within
5600 * retransmit interval. Back off to a longer retransmit interval
5601 * and retransmit one segment.
5604 if ((rack->r_ctl.rc_resend == NULL) ||
5605 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) {
5607 * If the rwnd collapsed on
5608 * the one we are retransmitting
5609 * it does not count against the
5614 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) {
5615 tp->t_rxtshift = TCP_MAXRXTSHIFT;
5616 KMOD_TCPSTAT_INC(tcps_timeoutdrop);
5618 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
5619 tcp_set_inp_to_drop(rack->rc_inp,
5620 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT));
5623 if (tp->t_state == TCPS_SYN_SENT) {
5625 * If the SYN was retransmitted, indicate CWND to be limited
5626 * to 1 segment in cc_conn_init().
5629 } else if (tp->t_rxtshift == 1) {
5631 * first retransmit; record ssthresh and cwnd so they can be
5632 * recovered if this turns out to be a "bad" retransmit. A
5633 * retransmit is considered "bad" if an ACK for this segment
5634 * is received within RTT/2 interval; the assumption here is
5635 * that the ACK was already in flight. See "On Estimating
5636 * End-to-End Network Path Properties" by Allman and Paxson
5639 tp->snd_cwnd_prev = tp->snd_cwnd;
5640 tp->snd_ssthresh_prev = tp->snd_ssthresh;
5641 tp->snd_recover_prev = tp->snd_recover;
5642 if (IN_FASTRECOVERY(tp->t_flags))
5643 tp->t_flags |= TF_WASFRECOVERY;
5645 tp->t_flags &= ~TF_WASFRECOVERY;
5646 if (IN_CONGRECOVERY(tp->t_flags))
5647 tp->t_flags |= TF_WASCRECOVERY;
5649 tp->t_flags &= ~TF_WASCRECOVERY;
5650 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
5651 tp->t_flags |= TF_PREVVALID;
5653 tp->t_flags &= ~TF_PREVVALID;
5654 KMOD_TCPSTAT_INC(tcps_rexmttimeo);
5655 if ((tp->t_state == TCPS_SYN_SENT) ||
5656 (tp->t_state == TCPS_SYN_RECEIVED))
5657 rexmt = MSEC_2_TICKS(RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]);
5659 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
5660 TCPT_RANGESET(tp->t_rxtcur, rexmt,
5661 max(MSEC_2_TICKS(rack_rto_min), rexmt),
5662 MSEC_2_TICKS(rack_rto_max));
5664 * We enter the path for PLMTUD if connection is established or, if
5665 * connection is FIN_WAIT_1 status, reason for the last is that if
5666 * amount of data we send is very small, we could send it in couple
5667 * of packets and process straight to FIN. In that case we won't
5668 * catch ESTABLISHED state.
5671 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false;
5675 if (((V_tcp_pmtud_blackhole_detect == 1) ||
5676 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) ||
5677 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) &&
5678 ((tp->t_state == TCPS_ESTABLISHED) ||
5679 (tp->t_state == TCPS_FIN_WAIT_1))) {
5681 * Idea here is that at each stage of mtu probe (usually,
5682 * 1448 -> 1188 -> 524) should be given 2 chances to recover
5683 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
5684 * should take care of that.
5686 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
5687 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
5688 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
5689 tp->t_rxtshift % 2 == 0)) {
5691 * Enter Path MTU Black-hole Detection mechanism: -
5692 * Disable Path MTU Discovery (IP "DF" bit). -
5693 * Reduce MTU to lower value than what we negotiated
5696 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
5697 /* Record that we may have found a black hole. */
5698 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
5699 /* Keep track of previous MSS. */
5700 tp->t_pmtud_saved_maxseg = tp->t_maxseg;
5704 * Reduce the MSS to blackhole value or to the
5705 * default in an attempt to retransmit.
5709 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
5710 /* Use the sysctl tuneable blackhole MSS. */
5711 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
5712 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
5713 } else if (isipv6) {
5714 /* Use the default MSS. */
5715 tp->t_maxseg = V_tcp_v6mssdflt;
5717 * Disable Path MTU Discovery when we switch
5720 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
5721 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
5724 #if defined(INET6) && defined(INET)
5728 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
5729 /* Use the sysctl tuneable blackhole MSS. */
5730 tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
5731 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
5733 /* Use the default MSS. */
5734 tp->t_maxseg = V_tcp_mssdflt;
5736 * Disable Path MTU Discovery when we switch
5739 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
5740 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
5745 * If further retransmissions are still unsuccessful
5746 * with a lowered MTU, maybe this isn't a blackhole
5747 * and we restore the previous MSS and blackhole
5748 * detection flags. The limit '6' is determined by
5749 * giving each probe stage (1448, 1188, 524) 2
5750 * chances to recover.
5752 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
5753 (tp->t_rxtshift >= 6)) {
5754 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
5755 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
5756 tp->t_maxseg = tp->t_pmtud_saved_maxseg;
5757 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed);
5762 * If we backed off this far, our srtt estimate is probably bogus.
5763 * Clobber it so we'll take the next rtt measurement as our srtt;
5764 * move the current srtt into rttvar to keep the current retransmit
5767 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
5769 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
5770 in6_losing(tp->t_inpcb);
5773 in_losing(tp->t_inpcb);
5774 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
5777 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
5778 tp->snd_recover = tp->snd_max;
5779 tp->t_flags |= TF_ACKNOW;
5781 rack_cong_signal(tp, NULL, CC_RTO);
5787 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling)
5790 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
5795 if (tp->t_state == TCPS_LISTEN) {
5796 /* no timers on listen sockets */
5797 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
5801 if ((timers & PACE_TMR_RACK) &&
5802 rack->rc_on_min_to) {
5804 * For the rack timer when we
5805 * are on a min-timeout (which means rrr_conf = 3)
5806 * we don't want to check the timer. It may
5807 * be going off for a pace and thats ok we
5808 * want to send the retransmit (if its ready).
5810 * If its on a normal rack timer (non-min) then
5811 * we will check if its expired.
5813 goto skip_time_check;
5815 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
5818 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
5820 rack_log_to_processing(rack, cts, ret, 0);
5823 if (hpts_calling == 0) {
5825 * A user send or queued mbuf (sack) has called us? We
5826 * return 0 and let the pacing guards
5827 * deal with it if they should or
5828 * should not cause a send.
5831 rack_log_to_processing(rack, cts, ret, 0);
5835 * Ok our timer went off early and we are not paced false
5836 * alarm, go back to sleep.
5839 left = rack->r_ctl.rc_timer_exp - cts;
5840 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left));
5841 rack_log_to_processing(rack, cts, ret, left);
5845 rack->rc_tmr_stopped = 0;
5846 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
5847 if (timers & PACE_TMR_DELACK) {
5848 ret = rack_timeout_delack(tp, rack, cts);
5849 } else if (timers & PACE_TMR_RACK) {
5850 rack->r_ctl.rc_tlp_rxt_last_time = cts;
5851 ret = rack_timeout_rack(tp, rack, cts);
5852 } else if (timers & PACE_TMR_TLP) {
5853 rack->r_ctl.rc_tlp_rxt_last_time = cts;
5854 ret = rack_timeout_tlp(tp, rack, cts);
5855 } else if (timers & PACE_TMR_RXT) {
5856 rack->r_ctl.rc_tlp_rxt_last_time = cts;
5857 ret = rack_timeout_rxt(tp, rack, cts);
5858 } else if (timers & PACE_TMR_PERSIT) {
5859 ret = rack_timeout_persist(tp, rack, cts);
5860 } else if (timers & PACE_TMR_KEEP) {
5861 ret = rack_timeout_keepalive(tp, rack, cts);
5863 rack_log_to_processing(rack, cts, ret, timers);
5868 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
5871 uint32_t us_cts, flags_on_entry;
5872 uint8_t hpts_removed = 0;
5874 flags_on_entry = rack->r_ctl.rc_hpts_flags;
5875 us_cts = tcp_get_usecs(&tv);
5876 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
5877 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) ||
5878 ((tp->snd_max - tp->snd_una) == 0))) {
5879 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
5881 /* If we were not delayed cancel out the flag. */
5882 if ((tp->snd_max - tp->snd_una) == 0)
5883 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
5884 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
5886 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
5887 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
5888 if (rack->rc_inp->inp_in_hpts &&
5889 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
5891 * Canceling timer's when we have no output being
5892 * paced. We also must remove ourselves from the
5895 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
5898 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
5900 if (hpts_removed == 0)
5901 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
5905 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type)
5911 rack_stopall(struct tcpcb *tp)
5913 struct tcp_rack *rack;
5914 rack = (struct tcp_rack *)tp->t_fb_ptr;
5915 rack->t_timers_stopped = 1;
5920 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta)
5926 rack_timer_active(struct tcpcb *tp, uint32_t timer_type)
5932 rack_stop_all_timers(struct tcpcb *tp)
5934 struct tcp_rack *rack;
5937 * Assure no timers are running.
5939 if (tcp_timer_active(tp, TT_PERSIST)) {
5940 /* We enter in persists, set the flag appropriately */
5941 rack = (struct tcp_rack *)tp->t_fb_ptr;
5942 rack->rc_in_persist = 1;
5944 tcp_timer_suspend(tp, TT_PERSIST);
5945 tcp_timer_suspend(tp, TT_REXMT);
5946 tcp_timer_suspend(tp, TT_KEEP);
5947 tcp_timer_suspend(tp, TT_DELACK);
5951 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
5952 struct rack_sendmap *rsm, uint32_t ts)
5957 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
5959 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
5960 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
5961 rsm->r_flags |= RACK_OVERMAX;
5963 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) {
5964 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
5965 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
5967 idx = rsm->r_rtr_cnt - 1;
5968 rsm->r_tim_lastsent[idx] = ts;
5969 if (rsm->r_flags & RACK_ACKED) {
5970 /* Problably MTU discovery messing with us */
5971 rsm->r_flags &= ~RACK_ACKED;
5972 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
5974 if (rsm->r_in_tmap) {
5975 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
5978 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
5980 if (rsm->r_flags & RACK_SACK_PASSED) {
5981 /* We have retransmitted due to the SACK pass */
5982 rsm->r_flags &= ~RACK_SACK_PASSED;
5983 rsm->r_flags |= RACK_WAS_SACKPASS;
5988 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
5989 struct rack_sendmap *rsm, uint32_t ts, int32_t *lenp)
5992 * We (re-)transmitted starting at rsm->r_start for some length
5993 * (possibly less than r_end.
5995 struct rack_sendmap *nrsm, *insret;
6000 c_end = rsm->r_start + len;
6001 if (SEQ_GEQ(c_end, rsm->r_end)) {
6003 * We retransmitted the whole piece or more than the whole
6004 * slopping into the next rsm.
6006 rack_update_rsm(tp, rack, rsm, ts);
6007 if (c_end == rsm->r_end) {
6013 /* Hangs over the end return whats left */
6014 act_len = rsm->r_end - rsm->r_start;
6015 *lenp = (len - act_len);
6016 return (rsm->r_end);
6018 /* We don't get out of this block. */
6021 * Here we retransmitted less than the whole thing which means we
6022 * have to split this into what was transmitted and what was not.
6024 nrsm = rack_alloc_full_limit(rack);
6027 * We can't get memory, so lets not proceed.
6033 * So here we are going to take the original rsm and make it what we
6034 * retransmitted. nrsm will be the tail portion we did not
6035 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
6036 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
6037 * 1, 6 and the new piece will be 6, 11.
6039 rack_clone_rsm(rack, nrsm, rsm, c_end);
6041 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
6042 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
6044 if (insret != NULL) {
6045 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
6046 nrsm, insret, rack, rsm);
6049 if (rsm->r_in_tmap) {
6050 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
6051 nrsm->r_in_tmap = 1;
6053 rsm->r_flags &= (~RACK_HAS_FIN);
6054 rack_update_rsm(tp, rack, rsm, ts);
6060 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
6061 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts,
6062 uint8_t pass, struct rack_sendmap *hintrsm, uint32_t us_cts)
6064 struct tcp_rack *rack;
6065 struct rack_sendmap *rsm, *nrsm, *insret, fe;
6066 register uint32_t snd_max, snd_una;
6069 * Add to the RACK log of packets in flight or retransmitted. If
6070 * there is a TS option we will use the TS echoed, if not we will
6073 * Retransmissions will increment the count and move the ts to its
6074 * proper place. Note that if options do not include TS's then we
6075 * won't be able to effectively use the ACK for an RTT on a retran.
6077 * Notes about r_start and r_end. Lets consider a send starting at
6078 * sequence 1 for 10 bytes. In such an example the r_start would be
6079 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
6080 * This means that r_end is actually the first sequence for the next
6085 * If err is set what do we do XXXrrs? should we not add the thing?
6086 * -- i.e. return if err != 0 or should we pretend we sent it? --
6087 * i.e. proceed with add ** do this for now.
6089 INP_WLOCK_ASSERT(tp->t_inpcb);
6092 * We don't log errors -- we could but snd_max does not
6093 * advance in this case either.
6097 if (th_flags & TH_RST) {
6099 * We don't log resets and we return immediately from
6104 rack = (struct tcp_rack *)tp->t_fb_ptr;
6105 snd_una = tp->snd_una;
6106 if (SEQ_LEQ((seq_out + len), snd_una)) {
6107 /* Are sending an old segment to induce an ack (keep-alive)? */
6110 if (SEQ_LT(seq_out, snd_una)) {
6111 /* huh? should we panic? */
6114 end = seq_out + len;
6116 if (SEQ_GEQ(end, seq_out))
6117 len = end - seq_out;
6121 snd_max = tp->snd_max;
6122 if (th_flags & (TH_SYN | TH_FIN)) {
6124 * The call to rack_log_output is made before bumping
6125 * snd_max. This means we can record one extra byte on a SYN
6126 * or FIN if seq_out is adding more on and a FIN is present
6127 * (and we are not resending).
6129 if ((th_flags & TH_SYN) && (seq_out == tp->iss))
6131 if (th_flags & TH_FIN)
6133 if (SEQ_LT(snd_max, tp->snd_nxt)) {
6135 * The add/update as not been done for the FIN/SYN
6138 snd_max = tp->snd_nxt;
6142 /* We don't log zero window probes */
6145 rack->r_ctl.rc_time_last_sent = ts;
6146 if (IN_RECOVERY(tp->t_flags)) {
6147 rack->r_ctl.rc_prr_out += len;
6149 /* First question is it a retransmission or new? */
6150 if (seq_out == snd_max) {
6153 rsm = rack_alloc(rack);
6156 * Hmm out of memory and the tcb got destroyed while
6161 if (th_flags & TH_FIN) {
6162 rsm->r_flags = RACK_HAS_FIN;
6166 rsm->r_tim_lastsent[0] = ts;
6168 rsm->r_rtr_bytes = 0;
6169 rsm->usec_orig_send = us_cts;
6170 if (th_flags & TH_SYN) {
6171 /* The data space is one beyond snd_una */
6172 rsm->r_flags |= RACK_HAS_SIN;
6173 rsm->r_start = seq_out + 1;
6174 rsm->r_end = rsm->r_start + (len - 1);
6177 rsm->r_start = seq_out;
6178 rsm->r_end = rsm->r_start + len;
6181 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
6182 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
6184 if (insret != NULL) {
6185 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
6186 nrsm, insret, rack, rsm);
6189 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
6192 * Special case detection, is there just a single
6193 * packet outstanding when we are not in recovery?
6195 * If this is true mark it so.
6197 if ((IN_RECOVERY(tp->t_flags) == 0) &&
6198 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) {
6199 struct rack_sendmap *prsm;
6201 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
6203 prsm->r_one_out_nr = 1;
6208 * If we reach here its a retransmission and we need to find it.
6210 memset(&fe, 0, sizeof(fe));
6212 if (hintrsm && (hintrsm->r_start == seq_out)) {
6216 /* No hints sorry */
6219 if ((rsm) && (rsm->r_start == seq_out)) {
6220 seq_out = rack_update_entry(tp, rack, rsm, ts, &len);
6227 /* Ok it was not the last pointer go through it the hard way. */
6229 fe.r_start = seq_out;
6230 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
6232 if (rsm->r_start == seq_out) {
6233 seq_out = rack_update_entry(tp, rack, rsm, ts, &len);
6240 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
6241 /* Transmitted within this piece */
6243 * Ok we must split off the front and then let the
6244 * update do the rest
6246 nrsm = rack_alloc_full_limit(rack);
6248 rack_update_rsm(tp, rack, rsm, ts);
6252 * copy rsm to nrsm and then trim the front of rsm
6253 * to not include this part.
6255 rack_clone_rsm(rack, nrsm, rsm, seq_out);
6256 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
6258 if (insret != NULL) {
6259 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
6260 nrsm, insret, rack, rsm);
6263 if (rsm->r_in_tmap) {
6264 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
6265 nrsm->r_in_tmap = 1;
6267 rsm->r_flags &= (~RACK_HAS_FIN);
6268 seq_out = rack_update_entry(tp, rack, nrsm, ts, &len);
6276 * Hmm not found in map did they retransmit both old and on into the
6279 if (seq_out == tp->snd_max) {
6281 } else if (SEQ_LT(seq_out, tp->snd_max)) {
6283 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
6284 seq_out, len, tp->snd_una, tp->snd_max);
6285 printf("Starting Dump of all rack entries\n");
6286 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
6287 printf("rsm:%p start:%u end:%u\n",
6288 rsm, rsm->r_start, rsm->r_end);
6290 printf("Dump complete\n");
6291 panic("seq_out not found rack:%p tp:%p",
6297 * Hmm beyond sndmax? (only if we are using the new rtt-pack
6300 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
6301 seq_out, len, tp->snd_max, tp);
6307 * Record one of the RTT updates from an ack into
6308 * our sample structure.
6312 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt,
6313 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt)
6315 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
6316 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
6317 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
6319 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
6320 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
6321 rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
6323 if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
6324 if (us_rtt < rack->r_ctl.rc_gp_lowrtt)
6325 rack->r_ctl.rc_gp_lowrtt = us_rtt;
6326 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd)
6327 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
6329 if ((confidence == 1) &&
6331 (rsm->r_just_ret) ||
6332 (rsm->r_one_out_nr &&
6333 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) {
6335 * If the rsm had a just return
6336 * hit it then we can't trust the
6337 * rtt measurement for buffer deterimination
6338 * Note that a confidence of 2, indicates
6339 * SACK'd which overrides the r_just_ret or
6340 * the r_one_out_nr. If it was a CUM-ACK and
6341 * we had only two outstanding, but get an
6342 * ack for only 1. Then that also lowers our
6347 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
6348 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) {
6349 if (rack->r_ctl.rack_rs.confidence == 0) {
6351 * We take anything with no current confidence
6354 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
6355 rack->r_ctl.rack_rs.confidence = confidence;
6356 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
6357 } else if (confidence || rack->r_ctl.rack_rs.confidence) {
6359 * Once we have a confident number,
6360 * we can update it with a smaller
6361 * value since this confident number
6362 * may include the DSACK time until
6363 * the next segment (the second one) arrived.
6365 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
6366 rack->r_ctl.rack_rs.confidence = confidence;
6367 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
6370 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence);
6371 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
6372 rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
6373 rack->r_ctl.rack_rs.rs_rtt_cnt++;
6377 * Collect new round-trip time estimate
6378 * and update averages and current timeout.
6381 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
6384 uint32_t o_srtt, o_var;
6385 int32_t hrtt_up = 0;
6388 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
6389 /* No valid sample */
6391 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
6392 /* We are to use the lowest RTT seen in a single ack */
6393 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
6394 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
6395 /* We are to use the highest RTT seen in a single ack */
6396 rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
6397 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
6398 /* We are to use the average RTT seen in a single ack */
6399 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
6400 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
6403 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
6409 if (rack->rc_gp_rtt_set == 0) {
6411 * With no RTT we have to accept
6412 * even one we are not confident of.
6414 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt;
6415 rack->rc_gp_rtt_set = 1;
6416 } else if (rack->r_ctl.rack_rs.confidence) {
6417 /* update the running gp srtt */
6418 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8);
6419 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8;
6421 if (rack->r_ctl.rack_rs.confidence) {
6423 * record the low and high for highly buffered path computation,
6424 * we only do this if we are confident (not a retransmission).
6426 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) {
6427 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
6430 if (rack->rc_highly_buffered == 0) {
6432 * Currently once we declare a path has
6433 * highly buffered there is no going
6434 * back, which may be a problem...
6436 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) {
6437 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt,
6438 rack->r_ctl.rc_highest_us_rtt,
6439 rack->r_ctl.rc_lowest_us_rtt,
6441 rack->rc_highly_buffered = 1;
6445 if ((rack->r_ctl.rack_rs.confidence) ||
6446 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) {
6448 * If we are highly confident of it <or> it was
6449 * never retransmitted we accept it as the last us_rtt.
6451 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
6452 /* The lowest rtt can be set if its was not retransmited */
6453 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) {
6454 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
6455 if (rack->r_ctl.rc_lowest_us_rtt == 0)
6456 rack->r_ctl.rc_lowest_us_rtt = 1;
6459 rack_log_rtt_sample(rack, rtt);
6460 o_srtt = tp->t_srtt;
6461 o_var = tp->t_rttvar;
6462 rack = (struct tcp_rack *)tp->t_fb_ptr;
6463 if (tp->t_srtt != 0) {
6465 * srtt is stored as fixed point with 5 bits after the
6466 * binary point (i.e., scaled by 8). The following magic is
6467 * equivalent to the smoothing algorithm in rfc793 with an
6468 * alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed point).
6469 * Adjust rtt to origin 0.
6471 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
6472 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
6474 tp->t_srtt += delta;
6475 if (tp->t_srtt <= 0)
6479 * We accumulate a smoothed rtt variance (actually, a
6480 * smoothed mean difference), then set the retransmit timer
6481 * to smoothed rtt + 4 times the smoothed variance. rttvar
6482 * is stored as fixed point with 4 bits after the binary
6483 * point (scaled by 16). The following is equivalent to
6484 * rfc793 smoothing with an alpha of .75 (rttvar =
6485 * rttvar*3/4 + |delta| / 4). This replaces rfc793's
6490 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
6491 tp->t_rttvar += delta;
6492 if (tp->t_rttvar <= 0)
6494 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
6495 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
6498 * No rtt measurement yet - use the unsmoothed rtt. Set the
6499 * variance to half the rtt (so our first retransmit happens
6502 tp->t_srtt = rtt << TCP_RTT_SHIFT;
6503 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
6504 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
6506 KMOD_TCPSTAT_INC(tcps_rttupdated);
6509 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
6514 * the retransmit should happen at rtt + 4 * rttvar. Because of the
6515 * way we do the smoothing, srtt and rttvar will each average +1/2
6516 * tick of bias. When we compute the retransmit timer, we want 1/2
6517 * tick of rounding and 1 extra tick because of +-1/2 tick
6518 * uncertainty in the firing of the timer. The bias will give us
6519 * exactly the 1.5 tick we need. But, because the bias is
6520 * statistical, we have to test that we don't drop below the minimum
6521 * feasible timer (which is 2 ticks).
6523 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
6524 max(MSEC_2_TICKS(rack_rto_min), rtt + 2), MSEC_2_TICKS(rack_rto_max));
6525 tp->t_softerror = 0;
6529 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm,
6530 uint32_t t, uint32_t cts)
6533 * For this RSM, we acknowledged the data from a previous
6534 * transmission, not the last one we made. This means we did a false
6537 struct tcp_rack *rack;
6539 if (rsm->r_flags & RACK_HAS_FIN) {
6541 * The sending of the FIN often is multiple sent when we
6542 * have everything outstanding ack'd. We ignore this case
6543 * since its over now.
6547 if (rsm->r_flags & RACK_TLP) {
6549 * We expect TLP's to have this occur.
6553 rack = (struct tcp_rack *)tp->t_fb_ptr;
6554 /* should we undo cc changes and exit recovery? */
6555 if (IN_RECOVERY(tp->t_flags)) {
6556 if (rack->r_ctl.rc_rsm_start == rsm->r_start) {
6558 * Undo what we ratched down and exit recovery if
6561 EXIT_RECOVERY(tp->t_flags);
6562 tp->snd_recover = tp->snd_una;
6563 if (rack->r_ctl.rc_cwnd_at > tp->snd_cwnd)
6564 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at;
6565 if (rack->r_ctl.rc_ssthresh_at > tp->snd_ssthresh)
6566 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at;
6569 if (rsm->r_flags & RACK_WAS_SACKPASS) {
6571 * We retransmitted based on a sack and the earlier
6572 * retransmission ack'd it - re-ordering is occuring.
6574 counter_u64_add(rack_reorder_seen, 1);
6575 rack->r_ctl.rc_reorder_ts = cts;
6577 counter_u64_add(rack_badfr, 1);
6578 counter_u64_add(rack_badfr_bytes, (rsm->r_end - rsm->r_start));
6582 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts)
6585 * Apply to filter the inbound us-rtt at us_cts.
6589 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
6590 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt,
6592 if (rack->r_ctl.last_pacing_time &&
6593 rack->rc_gp_dyn_mul &&
6594 (rack->r_ctl.last_pacing_time > us_rtt))
6595 rack->pacing_longer_than_rtt = 1;
6597 rack->pacing_longer_than_rtt = 0;
6598 if (old_rtt > us_rtt) {
6599 /* We just hit a new lower rtt time */
6600 rack_log_rtt_shrinks(rack, us_cts, old_rtt,
6601 __LINE__, RACK_RTTS_NEWRTT);
6603 * Only count it if its lower than what we saw within our
6606 if ((old_rtt - us_rtt) > rack_min_rtt_movement) {
6607 if (rack_probertt_lower_within &&
6608 rack->rc_gp_dyn_mul &&
6609 (rack->use_fixed_rate == 0) &&
6610 (rack->rc_always_pace)) {
6612 * We are seeing a new lower rtt very close
6613 * to the time that we would have entered probe-rtt.
6614 * This is probably due to the fact that a peer flow
6615 * has entered probe-rtt. Lets go in now too.
6619 val = rack_probertt_lower_within * rack_time_between_probertt;
6621 if ((rack->in_probe_rtt == 0) &&
6622 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) {
6623 rack_enter_probertt(rack, us_cts);
6626 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
6632 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
6633 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack)
6636 uint32_t t, len_acked;
6638 if ((rsm->r_flags & RACK_ACKED) ||
6639 (rsm->r_flags & RACK_WAS_ACKED))
6643 if (ack_type == CUM_ACKED) {
6644 if (SEQ_GT(th_ack, rsm->r_end))
6645 len_acked = rsm->r_end - rsm->r_start;
6647 len_acked = th_ack - rsm->r_start;
6649 len_acked = rsm->r_end - rsm->r_start;
6650 if (rsm->r_rtr_cnt == 1) {
6653 t = cts - rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
6656 if (!tp->t_rttlow || tp->t_rttlow > t)
6658 if (!rack->r_ctl.rc_rack_min_rtt ||
6659 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
6660 rack->r_ctl.rc_rack_min_rtt = t;
6661 if (rack->r_ctl.rc_rack_min_rtt == 0) {
6662 rack->r_ctl.rc_rack_min_rtt = 1;
6665 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - rsm->usec_orig_send;
6668 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
6669 if (ack_type == SACKED)
6670 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt);
6673 * For cum-ack we are only confident if what
6674 * is being acked is included in a measurement.
6675 * Otherwise it could be an idle period that
6676 * includes Delayed-ack time.
6678 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt,
6679 (rack->app_limited_needs_set ? 0 : 1), rsm, rsm->r_rtr_cnt);
6681 if ((rsm->r_flags & RACK_TLP) &&
6682 (!IN_RECOVERY(tp->t_flags))) {
6683 /* Segment was a TLP and our retrans matched */
6684 if (rack->r_ctl.rc_tlp_cwnd_reduce) {
6685 rack->r_ctl.rc_rsm_start = tp->snd_max;
6686 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
6687 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
6688 rack_cong_signal(tp, NULL, CC_NDUPACK);
6690 * When we enter recovery we need to assure
6691 * we send one packet.
6693 if (rack->rack_no_prr == 0) {
6694 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
6695 rack_log_to_prr(rack, 7, 0);
6699 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
6700 /* New more recent rack_tmit_time */
6701 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
6702 rack->rc_rack_rtt = t;
6707 * We clear the soft/rxtshift since we got an ack.
6708 * There is no assurance we will call the commit() function
6709 * so we need to clear these to avoid incorrect handling.
6712 tp->t_softerror = 0;
6713 if ((to->to_flags & TOF_TS) &&
6714 (ack_type == CUM_ACKED) &&
6716 ((rsm->r_flags & RACK_OVERMAX) == 0)) {
6718 * Now which timestamp does it match? In this block the ACK
6719 * must be coming from a previous transmission.
6721 for (i = 0; i < rsm->r_rtr_cnt; i++) {
6722 if (rsm->r_tim_lastsent[i] == to->to_tsecr) {
6723 t = cts - rsm->r_tim_lastsent[i];
6726 if ((i + 1) < rsm->r_rtr_cnt) {
6728 rack_earlier_retran(tp, rsm, t, cts);
6730 if (!tp->t_rttlow || tp->t_rttlow > t)
6732 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
6733 rack->r_ctl.rc_rack_min_rtt = t;
6734 if (rack->r_ctl.rc_rack_min_rtt == 0) {
6735 rack->r_ctl.rc_rack_min_rtt = 1;
6738 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
6739 rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
6740 /* New more recent rack_tmit_time */
6741 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
6742 rack->rc_rack_rtt = t;
6744 tcp_rack_xmit_timer(rack, t + 1, len_acked, (t * HPTS_USEC_IN_MSEC), 0, rsm,
6752 * Ok its a SACK block that we retransmitted. or a windows
6753 * machine without timestamps. We can tell nothing from the
6754 * time-stamp since its not there or the time the peer last
6755 * recieved a segment that moved forward its cum-ack point.
6758 i = rsm->r_rtr_cnt - 1;
6759 t = cts - rsm->r_tim_lastsent[i];
6762 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
6764 * We retransmitted and the ack came back in less
6765 * than the smallest rtt we have observed. We most
6766 * likey did an improper retransmit as outlined in
6767 * 4.2 Step 3 point 2 in the rack-draft.
6769 i = rsm->r_rtr_cnt - 2;
6770 t = cts - rsm->r_tim_lastsent[i];
6771 rack_earlier_retran(tp, rsm, t, cts);
6772 } else if (rack->r_ctl.rc_rack_min_rtt) {
6774 * We retransmitted it and the retransmit did the
6777 if (!rack->r_ctl.rc_rack_min_rtt ||
6778 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
6779 rack->r_ctl.rc_rack_min_rtt = t;
6780 if (rack->r_ctl.rc_rack_min_rtt == 0) {
6781 rack->r_ctl.rc_rack_min_rtt = 1;
6784 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[i])) {
6785 /* New more recent rack_tmit_time */
6786 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[i];
6787 rack->rc_rack_rtt = t;
6796 * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
6799 rack_log_sack_passed(struct tcpcb *tp,
6800 struct tcp_rack *rack, struct rack_sendmap *rsm)
6802 struct rack_sendmap *nrsm;
6805 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
6806 rack_head, r_tnext) {
6808 /* Skip orginal segment he is acked */
6811 if (nrsm->r_flags & RACK_ACKED) {
6813 * Skip ack'd segments, though we
6814 * should not see these, since tmap
6815 * should not have ack'd segments.
6819 if (nrsm->r_flags & RACK_SACK_PASSED) {
6821 * We found one that is already marked
6822 * passed, we have been here before and
6823 * so all others below this are marked.
6827 nrsm->r_flags |= RACK_SACK_PASSED;
6828 nrsm->r_flags &= ~RACK_WAS_SACKPASS;
6833 rack_need_set_test(struct tcpcb *tp,
6834 struct tcp_rack *rack,
6835 struct rack_sendmap *rsm,
6841 if ((tp->t_flags & TF_GPUTINPROG) &&
6842 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
6844 * We were app limited, and this ack
6845 * butts up or goes beyond the point where we want
6846 * to start our next measurement. We need
6847 * to record the new gput_ts as here and
6848 * possibly update the start sequence.
6852 if (rsm->r_rtr_cnt > 1) {
6854 * This is a retransmit, can we
6855 * really make any assessment at this
6856 * point? We are not really sure of
6857 * the timestamp, is it this or the
6858 * previous transmission?
6860 * Lets wait for something better that
6861 * is not retransmitted.
6867 rack->app_limited_needs_set = 0;
6868 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
6869 /* Do we start at a new end? */
6870 if ((use_which == RACK_USE_BEG) &&
6871 SEQ_GEQ(rsm->r_start, tp->gput_seq)) {
6873 * When we get an ACK that just eats
6874 * up some of the rsm, we set RACK_USE_BEG
6875 * since whats at r_start (i.e. th_ack)
6876 * is left unacked and thats where the
6877 * measurement not starts.
6879 tp->gput_seq = rsm->r_start;
6880 rack->r_ctl.rc_gp_output_ts = rsm->usec_orig_send;
6882 if ((use_which == RACK_USE_END) &&
6883 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
6885 * We use the end when the cumack
6886 * is moving forward and completely
6887 * deleting the rsm passed so basically
6888 * r_end holds th_ack.
6890 * For SACK's we also want to use the end
6891 * since this piece just got sacked and
6892 * we want to target anything after that
6893 * in our measurement.
6895 tp->gput_seq = rsm->r_end;
6896 rack->r_ctl.rc_gp_output_ts = rsm->usec_orig_send;
6898 if (use_which == RACK_USE_END_OR_THACK) {
6900 * special case for ack moving forward,
6901 * not a sack, we need to move all the
6902 * way up to where this ack cum-ack moves
6905 if (SEQ_GT(th_ack, rsm->r_end))
6906 tp->gput_seq = th_ack;
6908 tp->gput_seq = rsm->r_end;
6909 rack->r_ctl.rc_gp_output_ts = rsm->usec_orig_send;
6911 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) {
6913 * We moved beyond this guy's range, re-calculate
6914 * the new end point.
6916 if (rack->rc_gp_filled == 0) {
6917 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
6919 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
6923 * We are moving the goal post, we may be able to clear the
6924 * measure_saw_probe_rtt flag.
6926 if ((rack->in_probe_rtt == 0) &&
6927 (rack->measure_saw_probe_rtt) &&
6928 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
6929 rack->measure_saw_probe_rtt = 0;
6930 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts,
6931 seq, tp->gput_seq, 0, 5, line, NULL);
6932 if (rack->rc_gp_filled &&
6933 ((tp->gput_ack - tp->gput_seq) <
6934 max(rc_init_window(rack), (MIN_GP_WIN *
6935 ctf_fixed_maxseg(tp))))) {
6937 * There is no sense of continuing this measurement
6938 * because its too small to gain us anything we
6939 * trust. Skip it and that way we can start a new
6940 * measurement quicker.
6942 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
6943 0, 0, 0, 6, __LINE__, NULL);
6944 tp->t_flags &= ~TF_GPUTINPROG;
6950 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
6951 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two)
6953 uint32_t start, end, changed = 0;
6954 struct rack_sendmap stack_map;
6955 struct rack_sendmap *rsm, *nrsm, fe, *insret, *prev, *next;
6956 int32_t used_ref = 1;
6959 start = sack->start;
6962 memset(&fe, 0, sizeof(fe));
6964 if ((rsm == NULL) ||
6965 (SEQ_LT(end, rsm->r_start)) ||
6966 (SEQ_GEQ(start, rsm->r_end)) ||
6967 (SEQ_LT(start, rsm->r_start))) {
6969 * We are not in the right spot,
6970 * find the correct spot in the tree.
6974 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
6981 /* Ok we have an ACK for some piece of this rsm */
6982 if (rsm->r_start != start) {
6983 if ((rsm->r_flags & RACK_ACKED) == 0) {
6985 * Need to split this in two pieces the before and after,
6986 * the before remains in the map, the after must be
6987 * added. In other words we have:
6988 * rsm |--------------|
6992 * and nrsm will be the sacked piece
6995 * But before we start down that path lets
6996 * see if the sack spans over on top of
6997 * the next guy and it is already sacked.
6999 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7000 if (next && (next->r_flags & RACK_ACKED) &&
7001 SEQ_GEQ(end, next->r_start)) {
7003 * So the next one is already acked, and
7004 * we can thus by hookery use our stack_map
7005 * to reflect the piece being sacked and
7006 * then adjust the two tree entries moving
7007 * the start and ends around. So we start like:
7008 * rsm |------------| (not-acked)
7009 * next |-----------| (acked)
7010 * sackblk |-------->
7011 * We want to end like so:
7012 * rsm |------| (not-acked)
7013 * next |-----------------| (acked)
7015 * Where nrsm is a temporary stack piece we
7016 * use to update all the gizmos.
7018 /* Copy up our fudge block */
7020 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
7021 /* Now adjust our tree blocks */
7023 next->r_start = start;
7024 /* Clear out the dup ack count of the remainder */
7026 rsm->r_just_ret = 0;
7027 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7028 /* Now lets make sure our fudge block is right */
7029 nrsm->r_start = start;
7030 /* Now lets update all the stats and such */
7031 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
7032 if (rack->app_limited_needs_set)
7033 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
7034 changed += (nrsm->r_end - nrsm->r_start);
7035 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
7036 if (nrsm->r_flags & RACK_SACK_PASSED) {
7037 counter_u64_add(rack_reorder_seen, 1);
7038 rack->r_ctl.rc_reorder_ts = cts;
7041 * Now we want to go up from rsm (the
7042 * one left un-acked) to the next one
7043 * in the tmap. We do this so when
7044 * we walk backwards we include marking
7045 * sack-passed on rsm (The one passed in
7046 * is skipped since it is generally called
7047 * on something sacked before removing it
7050 if (rsm->r_in_tmap) {
7051 nrsm = TAILQ_NEXT(rsm, r_tnext);
7053 * Now that we have the next
7054 * one walk backwards from there.
7056 if (nrsm && nrsm->r_in_tmap)
7057 rack_log_sack_passed(tp, rack, nrsm);
7059 /* Now are we done? */
7060 if (SEQ_LT(end, next->r_end) ||
7061 (end == next->r_end)) {
7062 /* Done with block */
7065 counter_u64_add(rack_sack_used_next_merge, 1);
7066 /* Postion for the next block */
7067 start = next->r_end;
7068 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next);
7073 * We can't use any hookery here, so we
7074 * need to split the map. We enter like
7078 * We will add the new block nrsm and
7079 * that will be the new portion, and then
7080 * fall through after reseting rsm. So we
7081 * split and look like this:
7085 * We then fall through reseting
7086 * rsm to nrsm, so the next block
7089 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
7092 * failed XXXrrs what can we do but loose the sack
7097 counter_u64_add(rack_sack_splits, 1);
7098 rack_clone_rsm(rack, nrsm, rsm, start);
7099 rsm->r_just_ret = 0;
7100 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7102 if (insret != NULL) {
7103 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7104 nrsm, insret, rack, rsm);
7107 if (rsm->r_in_tmap) {
7108 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7109 nrsm->r_in_tmap = 1;
7111 rsm->r_flags &= (~RACK_HAS_FIN);
7112 /* Position us to point to the new nrsm that starts the sack blk */
7116 /* Already sacked this piece */
7117 counter_u64_add(rack_sack_skipped_acked, 1);
7119 if (end == rsm->r_end) {
7120 /* Done with block */
7121 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7123 } else if (SEQ_LT(end, rsm->r_end)) {
7124 /* A partial sack to a already sacked block */
7126 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7130 * The end goes beyond this guy
7131 * repostion the start to the
7135 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7141 if (SEQ_GEQ(end, rsm->r_end)) {
7143 * The end of this block is either beyond this guy or right
7144 * at this guy. I.e.:
7150 if ((rsm->r_flags & RACK_ACKED) == 0) {
7151 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
7152 changed += (rsm->r_end - rsm->r_start);
7153 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
7154 if (rsm->r_in_tmap) /* should be true */
7155 rack_log_sack_passed(tp, rack, rsm);
7156 /* Is Reordering occuring? */
7157 if (rsm->r_flags & RACK_SACK_PASSED) {
7158 rsm->r_flags &= ~RACK_SACK_PASSED;
7159 counter_u64_add(rack_reorder_seen, 1);
7160 rack->r_ctl.rc_reorder_ts = cts;
7162 if (rack->app_limited_needs_set)
7163 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
7164 rsm->r_ack_arrival = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
7165 rsm->r_flags |= RACK_ACKED;
7166 rsm->r_flags &= ~RACK_TLP;
7167 if (rsm->r_in_tmap) {
7168 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7172 counter_u64_add(rack_sack_skipped_acked, 1);
7175 if (end == rsm->r_end) {
7176 /* This block only - done, setup for next */
7180 * There is more not coverend by this rsm move on
7181 * to the next block in the RB tree.
7183 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7191 * The end of this sack block is smaller than
7196 if ((rsm->r_flags & RACK_ACKED) == 0) {
7197 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7198 if (prev && (prev->r_flags & RACK_ACKED)) {
7200 * Goal, we want the right remainder of rsm to shrink
7201 * in place and span from (rsm->r_start = end) to rsm->r_end.
7202 * We want to expand prev to go all the way
7203 * to prev->r_end <- end.
7204 * so in the tree we have before:
7205 * prev |--------| (acked)
7206 * rsm |-------| (non-acked)
7208 * We churn it so we end up with
7209 * prev |----------| (acked)
7210 * rsm |-----| (non-acked)
7211 * nrsm |-| (temporary)
7214 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
7217 /* Now adjust nrsm (stack copy) to be
7218 * the one that is the small
7219 * piece that was "sacked".
7223 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7225 * Now nrsm is our new little piece
7226 * that is acked (which was merged
7227 * to prev). Update the rtt and changed
7228 * based on that. Also check for reordering.
7230 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
7231 if (rack->app_limited_needs_set)
7232 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
7233 changed += (nrsm->r_end - nrsm->r_start);
7234 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
7235 if (nrsm->r_flags & RACK_SACK_PASSED) {
7236 counter_u64_add(rack_reorder_seen, 1);
7237 rack->r_ctl.rc_reorder_ts = cts;
7240 counter_u64_add(rack_sack_used_prev_merge, 1);
7243 * This is the case where our previous
7244 * block is not acked either, so we must
7245 * split the block in two.
7247 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
7249 /* failed rrs what can we do but loose the sack info? */
7253 * In this case nrsm becomes
7254 * nrsm->r_start = end;
7255 * nrsm->r_end = rsm->r_end;
7256 * which is un-acked.
7258 * rsm->r_end = nrsm->r_start;
7259 * i.e. the remaining un-acked
7260 * piece is left on the left
7263 * So we start like this
7264 * rsm |----------| (not acked)
7266 * build it so we have
7268 * nrsm |------| (not acked)
7270 counter_u64_add(rack_sack_splits, 1);
7271 rack_clone_rsm(rack, nrsm, rsm, end);
7272 rsm->r_flags &= (~RACK_HAS_FIN);
7273 rsm->r_just_ret = 0;
7274 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7276 if (insret != NULL) {
7277 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7278 nrsm, insret, rack, rsm);
7281 if (rsm->r_in_tmap) {
7282 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7283 nrsm->r_in_tmap = 1;
7286 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
7287 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
7288 changed += (rsm->r_end - rsm->r_start);
7289 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
7290 if (rsm->r_in_tmap) /* should be true */
7291 rack_log_sack_passed(tp, rack, rsm);
7292 /* Is Reordering occuring? */
7293 if (rsm->r_flags & RACK_SACK_PASSED) {
7294 rsm->r_flags &= ~RACK_SACK_PASSED;
7295 counter_u64_add(rack_reorder_seen, 1);
7296 rack->r_ctl.rc_reorder_ts = cts;
7298 if (rack->app_limited_needs_set)
7299 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
7300 rsm->r_ack_arrival = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
7301 rsm->r_flags |= RACK_ACKED;
7302 rsm->r_flags &= ~RACK_TLP;
7303 if (rsm->r_in_tmap) {
7304 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7308 } else if (start != end){
7310 * The block was already acked.
7312 counter_u64_add(rack_sack_skipped_acked, 1);
7316 if (rsm && (rsm->r_flags & RACK_ACKED)) {
7318 * Now can we merge where we worked
7319 * with either the previous or
7322 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7324 if (next->r_flags & RACK_ACKED) {
7325 /* yep this and next can be merged */
7326 rsm = rack_merge_rsm(rack, rsm, next);
7327 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7331 /* Now what about the previous? */
7332 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7334 if (prev->r_flags & RACK_ACKED) {
7335 /* yep the previous and this can be merged */
7336 rsm = rack_merge_rsm(rack, prev, rsm);
7337 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7342 if (used_ref == 0) {
7343 counter_u64_add(rack_sack_proc_all, 1);
7345 counter_u64_add(rack_sack_proc_short, 1);
7347 /* Save off the next one for quick reference. */
7349 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7352 *prsm = rack->r_ctl.rc_sacklast = nrsm;
7353 /* Pass back the moved. */
7359 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
7361 struct rack_sendmap *tmap;
7364 while (rsm && (rsm->r_flags & RACK_ACKED)) {
7365 /* Its no longer sacked, mark it so */
7366 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
7368 if (rsm->r_in_tmap) {
7369 panic("rack:%p rsm:%p flags:0x%x in tmap?",
7370 rack, rsm, rsm->r_flags);
7373 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
7374 /* Rebuild it into our tmap */
7376 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7379 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
7382 tmap->r_in_tmap = 1;
7383 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7386 * Now lets possibly clear the sack filter so we start
7387 * recognizing sacks that cover this area.
7389 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
7394 rack_do_decay(struct tcp_rack *rack)
7398 #define timersub(tvp, uvp, vvp) \
7400 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
7401 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
7402 if ((vvp)->tv_usec < 0) { \
7404 (vvp)->tv_usec += 1000000; \
7408 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res);
7411 rack->r_ctl.input_pkt++;
7412 if ((rack->rc_in_persist) ||
7413 (res.tv_sec >= 1) ||
7414 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) {
7416 * Check for decay of non-SAD,
7417 * we want all SAD detection metrics to
7418 * decay 1/4 per second (or more) passed.
7422 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt;
7423 /* Update our saved tracking values */
7424 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt;
7425 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
7426 /* Now do we escape without decay? */
7427 #ifdef NETFLIX_EXP_DETECTION
7428 if (rack->rc_in_persist ||
7429 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) ||
7430 (pkt_delta < tcp_sad_low_pps)){
7432 * We don't decay idle connections
7433 * or ones that have a low input pps.
7437 /* Decay the counters */
7438 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count,
7440 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count,
7442 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra,
7444 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move,
7451 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
7453 uint32_t changed, entered_recovery = 0;
7454 struct tcp_rack *rack;
7455 struct rack_sendmap *rsm, *rm;
7456 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
7457 register uint32_t th_ack;
7458 int32_t i, j, k, num_sack_blks = 0;
7459 uint32_t cts, acked, ack_point, sack_changed = 0;
7460 int loop_start = 0, moved_two = 0;
7463 INP_WLOCK_ASSERT(tp->t_inpcb);
7464 if (th->th_flags & TH_RST) {
7465 /* We don't log resets */
7468 rack = (struct tcp_rack *)tp->t_fb_ptr;
7469 cts = tcp_ts_getticks();
7470 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
7472 th_ack = th->th_ack;
7473 if (rack->sack_attack_disable == 0)
7474 rack_do_decay(rack);
7475 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) {
7477 * You only get credit for
7478 * MSS and greater (and you get extra
7479 * credit for larger cum-ack moves).
7483 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp);
7484 rack->r_ctl.ack_count += ac;
7485 counter_u64_add(rack_ack_total, ac);
7487 if (rack->r_ctl.ack_count > 0xfff00000) {
7489 * reduce the number to keep us under
7492 rack->r_ctl.ack_count /= 2;
7493 rack->r_ctl.sack_count /= 2;
7495 if (SEQ_GT(th_ack, tp->snd_una)) {
7496 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
7497 tp->t_acktime = ticks;
7499 if (rsm && SEQ_GT(th_ack, rsm->r_start))
7500 changed = th_ack - rsm->r_start;
7503 * The ACK point is advancing to th_ack, we must drop off
7504 * the packets in the rack log and calculate any eligble
7507 rack->r_wanted_output = 1;
7509 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
7511 if ((th_ack - 1) == tp->iss) {
7513 * For the SYN incoming case we will not
7514 * have called tcp_output for the sending of
7515 * the SYN, so there will be no map. All
7516 * other cases should probably be a panic.
7520 if (tp->t_flags & TF_SENTFIN) {
7521 /* if we send a FIN we will not hav a map */
7525 panic("No rack map tp:%p for th:%p state:%d rack:%p snd_una:%u snd_max:%u snd_nxt:%u chg:%d\n",
7527 th, tp->t_state, rack,
7528 tp->snd_una, tp->snd_max, tp->snd_nxt, changed);
7532 if (SEQ_LT(th_ack, rsm->r_start)) {
7533 /* Huh map is missing this */
7535 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
7537 th_ack, tp->t_state, rack->r_state);
7541 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack);
7542 /* Now do we consume the whole thing? */
7543 if (SEQ_GEQ(th_ack, rsm->r_end)) {
7544 /* Its all consumed. */
7546 uint8_t newly_acked;
7548 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
7549 rsm->r_rtr_bytes = 0;
7550 /* Record the time of highest cumack sent */
7551 rack->r_ctl.rc_gp_cumack_ts = rsm->usec_orig_send;
7552 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7555 panic("removing head in rack:%p rsm:%p rm:%p",
7559 if (rsm->r_in_tmap) {
7560 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7564 if (rsm->r_flags & RACK_ACKED) {
7566 * It was acked on the scoreboard -- remove
7569 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
7571 } else if (rsm->r_flags & RACK_SACK_PASSED) {
7573 * There are segments ACKED on the
7574 * scoreboard further up. We are seeing
7577 rsm->r_flags &= ~RACK_SACK_PASSED;
7578 counter_u64_add(rack_reorder_seen, 1);
7579 rsm->r_ack_arrival = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
7580 rsm->r_flags |= RACK_ACKED;
7581 rack->r_ctl.rc_reorder_ts = cts;
7583 left = th_ack - rsm->r_end;
7584 if (rack->app_limited_needs_set && newly_acked)
7585 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK);
7586 /* Free back to zone */
7587 rack_free(rack, rsm);
7593 if (rsm->r_flags & RACK_ACKED) {
7595 * It was acked on the scoreboard -- remove it from
7596 * total for the part being cum-acked.
7598 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
7601 * Clear the dup ack count for
7602 * the piece that remains.
7605 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7606 if (rsm->r_rtr_bytes) {
7608 * It was retransmitted adjust the
7609 * sack holes for what was acked.
7613 ack_am = (th_ack - rsm->r_start);
7614 if (ack_am >= rsm->r_rtr_bytes) {
7615 rack->r_ctl.rc_holes_rxt -= ack_am;
7616 rsm->r_rtr_bytes -= ack_am;
7620 * Update where the piece starts and record
7621 * the time of send of highest cumack sent.
7623 rack->r_ctl.rc_gp_cumack_ts = rsm->usec_orig_send;
7624 rsm->r_start = th_ack;
7625 if (rack->app_limited_needs_set)
7626 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG);
7629 /* Check for reneging */
7630 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
7631 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
7633 * The peer has moved snd_una up to
7634 * the edge of this send, i.e. one
7635 * that it had previously acked. The only
7636 * way that can be true if the peer threw
7637 * away data (space issues) that it had
7638 * previously sacked (else it would have
7639 * given us snd_una up to (rsm->r_end).
7640 * We need to undo the acked markings here.
7642 * Note we have to look to make sure th_ack is
7643 * our rsm->r_start in case we get an old ack
7644 * where th_ack is behind snd_una.
7646 rack_peer_reneges(rack, rsm, th->th_ack);
7648 if ((to->to_flags & TOF_SACK) == 0) {
7649 /* We are done nothing left */
7652 /* Sack block processing */
7653 if (SEQ_GT(th_ack, tp->snd_una))
7656 ack_point = tp->snd_una;
7657 for (i = 0; i < to->to_nsacks; i++) {
7658 bcopy((to->to_sacks + i * TCPOLEN_SACK),
7659 &sack, sizeof(sack));
7660 sack.start = ntohl(sack.start);
7661 sack.end = ntohl(sack.end);
7662 if (SEQ_GT(sack.end, sack.start) &&
7663 SEQ_GT(sack.start, ack_point) &&
7664 SEQ_LT(sack.start, tp->snd_max) &&
7665 SEQ_GT(sack.end, ack_point) &&
7666 SEQ_LEQ(sack.end, tp->snd_max)) {
7667 sack_blocks[num_sack_blks] = sack;
7669 #ifdef NETFLIX_STATS
7670 } else if (SEQ_LEQ(sack.start, th_ack) &&
7671 SEQ_LEQ(sack.end, th_ack)) {
7673 * Its a D-SACK block.
7675 tcp_record_dsack(sack.start, sack.end);
7680 * Sort the SACK blocks so we can update the rack scoreboard with
7683 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks,
7684 num_sack_blks, th->th_ack);
7685 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks);
7686 if (num_sack_blks == 0) {
7687 /* Nothing to sack (DSACKs?) */
7688 goto out_with_totals;
7690 if (num_sack_blks < 2) {
7691 /* Only one, we don't need to sort */
7694 /* Sort the sacks */
7695 for (i = 0; i < num_sack_blks; i++) {
7696 for (j = i + 1; j < num_sack_blks; j++) {
7697 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
7698 sack = sack_blocks[i];
7699 sack_blocks[i] = sack_blocks[j];
7700 sack_blocks[j] = sack;
7705 * Now are any of the sack block ends the same (yes some
7706 * implementations send these)?
7709 if (num_sack_blks == 0)
7710 goto out_with_totals;
7711 if (num_sack_blks > 1) {
7712 for (i = 0; i < num_sack_blks; i++) {
7713 for (j = i + 1; j < num_sack_blks; j++) {
7714 if (sack_blocks[i].end == sack_blocks[j].end) {
7716 * Ok these two have the same end we
7717 * want the smallest end and then
7718 * throw away the larger and start
7721 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
7723 * The second block covers
7724 * more area use that
7726 sack_blocks[i].start = sack_blocks[j].start;
7729 * Now collapse out the dup-sack and
7732 for (k = (j + 1); k < num_sack_blks; k++) {
7733 sack_blocks[j].start = sack_blocks[k].start;
7734 sack_blocks[j].end = sack_blocks[k].end;
7745 * First lets look to see if
7746 * we have retransmitted and
7747 * can use the transmit next?
7749 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
7751 SEQ_GT(sack_blocks[0].end, rsm->r_start) &&
7752 SEQ_LT(sack_blocks[0].start, rsm->r_end)) {
7754 * We probably did the FR and the next
7755 * SACK in continues as we would expect.
7757 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two);
7759 rack->r_wanted_output = 1;
7761 sack_changed += acked;
7763 if (num_sack_blks == 1) {
7765 * This is what we would expect from
7766 * a normal implementation to happen
7767 * after we have retransmitted the FR,
7768 * i.e the sack-filter pushes down
7769 * to 1 block and the next to be retransmitted
7770 * is the sequence in the sack block (has more
7771 * are acked). Count this as ACK'd data to boost
7772 * up the chances of recovering any false positives.
7774 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp));
7775 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp)));
7776 counter_u64_add(rack_express_sack, 1);
7777 if (rack->r_ctl.ack_count > 0xfff00000) {
7779 * reduce the number to keep us under
7782 rack->r_ctl.ack_count /= 2;
7783 rack->r_ctl.sack_count /= 2;
7785 goto out_with_totals;
7788 * Start the loop through the
7789 * rest of blocks, past the first block.
7795 /* Its a sack of some sort */
7796 rack->r_ctl.sack_count++;
7797 if (rack->r_ctl.sack_count > 0xfff00000) {
7799 * reduce the number to keep us under
7802 rack->r_ctl.ack_count /= 2;
7803 rack->r_ctl.sack_count /= 2;
7805 counter_u64_add(rack_sack_total, 1);
7806 if (rack->sack_attack_disable) {
7807 /* An attacker disablement is in place */
7808 if (num_sack_blks > 1) {
7809 rack->r_ctl.sack_count += (num_sack_blks - 1);
7810 rack->r_ctl.sack_moved_extra++;
7811 counter_u64_add(rack_move_some, 1);
7812 if (rack->r_ctl.sack_moved_extra > 0xfff00000) {
7813 rack->r_ctl.sack_moved_extra /= 2;
7814 rack->r_ctl.sack_noextra_move /= 2;
7819 rsm = rack->r_ctl.rc_sacklast;
7820 for (i = loop_start; i < num_sack_blks; i++) {
7821 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two);
7823 rack->r_wanted_output = 1;
7825 sack_changed += acked;
7829 * If we did not get a SACK for at least a MSS and
7830 * had to move at all, or if we moved more than our
7831 * threshold, it counts against the "extra" move.
7833 rack->r_ctl.sack_moved_extra += moved_two;
7834 counter_u64_add(rack_move_some, 1);
7837 * else we did not have to move
7838 * any more than we would expect.
7840 rack->r_ctl.sack_noextra_move++;
7841 counter_u64_add(rack_move_none, 1);
7843 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) {
7845 * If the SACK was not a full MSS then
7846 * we add to sack_count the number of
7847 * MSS's (or possibly more than
7848 * a MSS if its a TSO send) we had to skip by.
7850 rack->r_ctl.sack_count += moved_two;
7851 counter_u64_add(rack_sack_total, moved_two);
7854 * Now we need to setup for the next
7855 * round. First we make sure we won't
7856 * exceed the size of our uint32_t on
7857 * the various counts, and then clear out
7860 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) ||
7861 (rack->r_ctl.sack_noextra_move > 0xfff00000)) {
7862 rack->r_ctl.sack_moved_extra /= 2;
7863 rack->r_ctl.sack_noextra_move /= 2;
7865 if (rack->r_ctl.sack_count > 0xfff00000) {
7866 rack->r_ctl.ack_count /= 2;
7867 rack->r_ctl.sack_count /= 2;
7872 if (num_sack_blks > 1) {
7874 * You get an extra stroke if
7875 * you have more than one sack-blk, this
7876 * could be where we are skipping forward
7877 * and the sack-filter is still working, or
7878 * it could be an attacker constantly
7881 rack->r_ctl.sack_moved_extra++;
7882 counter_u64_add(rack_move_some, 1);
7885 #ifdef NETFLIX_EXP_DETECTION
7886 if ((rack->do_detection || tcp_force_detection) &&
7887 tcp_sack_to_ack_thresh &&
7888 tcp_sack_to_move_thresh &&
7889 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) {
7891 * We have thresholds set to find
7892 * possible attackers and disable sack.
7895 uint64_t ackratio, moveratio, movetotal;
7898 rack_log_sad(rack, 1);
7899 ackratio = (uint64_t)(rack->r_ctl.sack_count);
7900 ackratio *= (uint64_t)(1000);
7901 if (rack->r_ctl.ack_count)
7902 ackratio /= (uint64_t)(rack->r_ctl.ack_count);
7904 /* We really should not hit here */
7907 if ((rack->sack_attack_disable == 0) &&
7908 (ackratio > rack_highest_sack_thresh_seen))
7909 rack_highest_sack_thresh_seen = (uint32_t)ackratio;
7910 movetotal = rack->r_ctl.sack_moved_extra;
7911 movetotal += rack->r_ctl.sack_noextra_move;
7912 moveratio = rack->r_ctl.sack_moved_extra;
7913 moveratio *= (uint64_t)1000;
7915 moveratio /= movetotal;
7917 /* No moves, thats pretty good */
7920 if ((rack->sack_attack_disable == 0) &&
7921 (moveratio > rack_highest_move_thresh_seen))
7922 rack_highest_move_thresh_seen = (uint32_t)moveratio;
7923 if (rack->sack_attack_disable == 0) {
7924 if ((ackratio > tcp_sack_to_ack_thresh) &&
7925 (moveratio > tcp_sack_to_move_thresh)) {
7926 /* Disable sack processing */
7927 rack->sack_attack_disable = 1;
7928 if (rack->r_rep_attack == 0) {
7929 rack->r_rep_attack = 1;
7930 counter_u64_add(rack_sack_attacks_detected, 1);
7932 if (tcp_attack_on_turns_on_logging) {
7934 * Turn on logging, used for debugging
7937 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging;
7939 /* Clamp the cwnd at flight size */
7940 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd;
7941 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
7942 rack_log_sad(rack, 2);
7945 /* We are sack-disabled check for false positives */
7946 if ((ackratio <= tcp_restoral_thresh) ||
7947 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) {
7948 rack->sack_attack_disable = 0;
7949 rack_log_sad(rack, 3);
7950 /* Restart counting */
7951 rack->r_ctl.sack_count = 0;
7952 rack->r_ctl.sack_moved_extra = 0;
7953 rack->r_ctl.sack_noextra_move = 1;
7954 rack->r_ctl.ack_count = max(1,
7955 (BYTES_THIS_ACK(tp, th)/ctf_fixed_maxseg(rack->rc_tp)));
7957 if (rack->r_rep_reverse == 0) {
7958 rack->r_rep_reverse = 1;
7959 counter_u64_add(rack_sack_attacks_reversed, 1);
7961 /* Restore the cwnd */
7962 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd)
7963 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd;
7969 /* Something changed cancel the rack timer */
7970 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
7972 tsused = tcp_ts_getticks();
7973 rsm = tcp_rack_output(tp, rack, tsused);
7974 if ((!IN_RECOVERY(tp->t_flags)) &&
7976 /* Enter recovery */
7977 rack->r_ctl.rc_rsm_start = rsm->r_start;
7978 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
7979 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
7980 entered_recovery = 1;
7981 rack_cong_signal(tp, NULL, CC_NDUPACK);
7983 * When we enter recovery we need to assure we send
7986 if (rack->rack_no_prr == 0) {
7987 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
7988 rack_log_to_prr(rack, 8, 0);
7990 rack->r_timer_override = 1;
7992 rack->r_ctl.rc_agg_early = 0;
7993 } else if (IN_RECOVERY(tp->t_flags) &&
7995 (rack->r_rr_config == 3)) {
7997 * Assure we can output and we get no
7998 * remembered pace time except the retransmit.
8000 rack->r_timer_override = 1;
8001 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
8002 rack->r_ctl.rc_resend = rsm;
8004 if (IN_RECOVERY(tp->t_flags) &&
8005 (rack->rack_no_prr == 0) &&
8006 (entered_recovery == 0)) {
8007 /* Deal with PRR here (in recovery only) */
8008 uint32_t pipe, snd_una;
8010 rack->r_ctl.rc_prr_delivered += changed;
8011 /* Compute prr_sndcnt */
8012 if (SEQ_GT(tp->snd_una, th_ack)) {
8013 snd_una = tp->snd_una;
8017 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt;
8018 if (pipe > tp->snd_ssthresh) {
8021 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
8022 if (rack->r_ctl.rc_prr_recovery_fs > 0)
8023 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
8025 rack->r_ctl.rc_prr_sndcnt = 0;
8026 rack_log_to_prr(rack, 9, 0);
8030 if (sndcnt > (long)rack->r_ctl.rc_prr_out)
8031 sndcnt -= rack->r_ctl.rc_prr_out;
8034 rack->r_ctl.rc_prr_sndcnt = sndcnt;
8035 rack_log_to_prr(rack, 10, 0);
8039 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
8040 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
8043 if (changed > limit)
8045 limit += ctf_fixed_maxseg(tp);
8046 if (tp->snd_ssthresh > pipe) {
8047 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
8048 rack_log_to_prr(rack, 11, 0);
8050 rack->r_ctl.rc_prr_sndcnt = min(0, limit);
8051 rack_log_to_prr(rack, 12, 0);
8054 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) &&
8055 ((rack->rc_inp->inp_in_hpts == 0) &&
8056 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) {
8058 * If you are pacing output you don't want
8062 rack->r_ctl.rc_agg_early = 0;
8063 rack->r_timer_override = 1;
8069 rack_strike_dupack(struct tcp_rack *rack)
8071 struct rack_sendmap *rsm;
8073 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
8074 if (rsm && (rsm->r_dupack < 0xff)) {
8076 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) {
8077 rack->r_wanted_output = 1;
8078 rack->r_timer_override = 1;
8079 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3);
8081 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3);
8087 rack_check_bottom_drag(struct tcpcb *tp,
8088 struct tcp_rack *rack,
8089 struct socket *so, int32_t acked)
8091 uint32_t segsiz, minseg;
8093 segsiz = ctf_fixed_maxseg(tp);
8096 if (tp->snd_max == tp->snd_una) {
8098 * We are doing dynamic pacing and we are way
8099 * under. Basically everything got acked while
8100 * we were still waiting on the pacer to expire.
8102 * This means we need to boost the b/w in
8103 * addition to any earlier boosting of
8106 rack->rc_dragged_bottom = 1;
8107 rack_validate_multipliers_at_or_above100(rack);
8109 * Lets use the segment bytes acked plus
8110 * the lowest RTT seen as the basis to
8111 * form a b/w estimate. This will be off
8112 * due to the fact that the true estimate
8113 * should be around 1/2 the time of the RTT
8114 * but we can settle for that.
8116 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) &&
8118 uint64_t bw, calc_bw, rtt;
8120 rtt = rack->r_ctl.rack_rs.rs_us_rtt;
8122 calc_bw = bw * 1000000;
8124 if (rack->r_ctl.last_max_bw &&
8125 (rack->r_ctl.last_max_bw < calc_bw)) {
8127 * If we have a last calculated max bw
8130 calc_bw = rack->r_ctl.last_max_bw;
8132 /* now plop it in */
8133 if (rack->rc_gp_filled == 0) {
8134 if (calc_bw > ONE_POINT_TWO_MEG) {
8136 * If we have no measurement
8137 * don't let us set in more than
8138 * 1.2Mbps. If we are still too
8139 * low after pacing with this we
8140 * will hopefully have a max b/w
8141 * available to sanity check things.
8143 calc_bw = ONE_POINT_TWO_MEG;
8145 rack->r_ctl.rc_rtt_diff = 0;
8146 rack->r_ctl.gp_bw = calc_bw;
8147 rack->rc_gp_filled = 1;
8148 rack->r_ctl.num_avg = RACK_REQ_AVG;
8149 rack_set_pace_segments(rack->rc_tp, rack, __LINE__);
8150 } else if (calc_bw > rack->r_ctl.gp_bw) {
8151 rack->r_ctl.rc_rtt_diff = 0;
8152 rack->r_ctl.num_avg = RACK_REQ_AVG;
8153 rack->r_ctl.gp_bw = calc_bw;
8154 rack_set_pace_segments(rack->rc_tp, rack, __LINE__);
8156 rack_increase_bw_mul(rack, -1, 0, 0, 1);
8158 * For acks over 1mss we do a extra boost to simulate
8159 * where we would get 2 acks (we want 110 for the mul).
8162 rack_increase_bw_mul(rack, -1, 0, 0, 1);
8165 * Huh, this should not be, settle
8166 * for just an old increase.
8168 rack_increase_bw_mul(rack, -1, 0, 0, 1);
8170 } else if ((IN_RECOVERY(tp->t_flags) == 0) &&
8171 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)),
8173 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) &&
8174 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) &&
8175 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <=
8176 (segsiz * rack_req_segs))) {
8178 * We are doing dynamic GP pacing and
8179 * we have everything except 1MSS or less
8180 * bytes left out. We are still pacing away.
8181 * And there is data that could be sent, This
8182 * means we are inserting delayed ack time in
8183 * our measurements because we are pacing too slow.
8185 rack_validate_multipliers_at_or_above100(rack);
8186 rack->rc_dragged_bottom = 1;
8187 rack_increase_bw_mul(rack, -1, 0, 0, 1);
8192 * Return value of 1, we do not need to call rack_process_data().
8193 * return value of 0, rack_process_data can be called.
8194 * For ret_val if its 0 the TCP is locked, if its non-zero
8195 * its unlocked and probably unsafe to touch the TCB.
8198 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
8199 struct tcpcb *tp, struct tcpopt *to,
8200 uint32_t tiwin, int32_t tlen,
8201 int32_t * ofia, int32_t thflags, int32_t * ret_val)
8203 int32_t ourfinisacked = 0;
8204 int32_t nsegs, acked_amount;
8207 struct tcp_rack *rack;
8208 int32_t under_pacing = 0;
8209 int32_t recovery = 0;
8211 rack = (struct tcp_rack *)tp->t_fb_ptr;
8212 if (SEQ_GT(th->th_ack, tp->snd_max)) {
8213 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val);
8214 rack->r_wanted_output = 1;
8217 if (rack->rc_gp_filled &&
8218 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
8221 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
8222 if (rack->rc_in_persist)
8224 if ((th->th_ack == tp->snd_una) && (tiwin == tp->snd_wnd))
8225 rack_strike_dupack(rack);
8226 rack_log_ack(tp, to, th);
8228 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
8230 * Old ack, behind (or duplicate to) the last one rcv'd
8231 * Note: Should mark reordering is occuring! We should also
8232 * look for sack blocks arriving e.g. ack 1, 4-4 then ack 1,
8233 * 3-3, 4-4 would be reording. As well as ack 1, 3-3 <no
8239 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
8240 * something we sent.
8242 if (tp->t_flags & TF_NEEDSYN) {
8244 * T/TCP: Connection was half-synchronized, and our SYN has
8245 * been ACK'd (so connection is now fully synchronized). Go
8246 * to non-starred state, increment snd_una for ACK of SYN,
8247 * and check if we can do window scaling.
8249 tp->t_flags &= ~TF_NEEDSYN;
8251 /* Do window scaling? */
8252 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
8253 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
8254 tp->rcv_scale = tp->request_r_scale;
8255 /* Send window already scaled. */
8258 nsegs = max(1, m->m_pkthdr.lro_nsegs);
8259 INP_WLOCK_ASSERT(tp->t_inpcb);
8261 acked = BYTES_THIS_ACK(tp, th);
8262 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
8263 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
8265 * If we just performed our first retransmit, and the ACK arrives
8266 * within our recovery window, then it was a mistake to do the
8267 * retransmit in the first place. Recover our original cwnd and
8268 * ssthresh, and proceed to transmit where we left off.
8270 if (tp->t_flags & TF_PREVVALID) {
8271 tp->t_flags &= ~TF_PREVVALID;
8272 if (tp->t_rxtshift == 1 &&
8273 (int)(ticks - tp->t_badrxtwin) < 0)
8274 rack_cong_signal(tp, th, CC_RTO_ERR);
8277 /* assure we are not backed off */
8279 rack->rc_tlp_in_progress = 0;
8280 rack->r_ctl.rc_tlp_cnt_out = 0;
8282 * If it is the RXT timer we want to
8283 * stop it, so we can restart a TLP.
8285 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
8286 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
8287 #ifdef NETFLIX_HTTP_LOGGING
8288 tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
8292 * If we have a timestamp reply, update smoothed round trip time. If
8293 * no timestamp is present but transmit timer is running and timed
8294 * sequence number was acked, update smoothed round trip time. Since
8295 * we now have an rtt measurement, cancel the timer backoff (cf.,
8296 * Phil Karn's retransmit alg.). Recompute the initial retransmit
8299 * Some boxes send broken timestamp replies during the SYN+ACK
8300 * phase, ignore timestamps of 0 or we could calculate a huge RTT
8301 * and blow up the retransmit timer.
8304 * If all outstanding data is acked, stop retransmit timer and
8305 * remember to restart (more output or persist). If there is more
8306 * data to be acked, restart retransmit timer, using current
8307 * (possibly backed-off) value.
8311 *ofia = ourfinisacked;
8314 if (rack->r_ctl.rc_early_recovery) {
8315 if (IN_RECOVERY(tp->t_flags)) {
8316 if (SEQ_LT(th->th_ack, tp->snd_recover) &&
8317 (SEQ_LT(th->th_ack, tp->snd_max))) {
8318 tcp_rack_partialack(tp, th);
8320 rack_post_recovery(tp, th);
8326 * Let the congestion control algorithm update congestion control
8327 * related information. This typically means increasing the
8328 * congestion window.
8330 rack_ack_received(tp, rack, th, nsegs, CC_ACK, recovery);
8331 SOCKBUF_LOCK(&so->so_snd);
8332 acked_amount = min(acked, (int)sbavail(&so->so_snd));
8333 tp->snd_wnd -= acked_amount;
8334 mfree = sbcut_locked(&so->so_snd, acked_amount);
8335 if ((sbused(&so->so_snd) == 0) &&
8336 (acked > acked_amount) &&
8337 (tp->t_state >= TCPS_FIN_WAIT_1) &&
8338 (tp->t_flags & TF_SENTFIN)) {
8340 * We must be sure our fin
8341 * was sent and acked (we can be
8342 * in FIN_WAIT_1 without having
8347 SOCKBUF_UNLOCK(&so->so_snd);
8348 tp->t_flags |= TF_WAKESOW;
8350 if (rack->r_ctl.rc_early_recovery == 0) {
8351 if (IN_RECOVERY(tp->t_flags)) {
8352 if (SEQ_LT(th->th_ack, tp->snd_recover) &&
8353 (SEQ_LT(th->th_ack, tp->snd_max))) {
8354 tcp_rack_partialack(tp, th);
8356 rack_post_recovery(tp, th);
8360 tp->snd_una = th->th_ack;
8361 if (SEQ_GT(tp->snd_una, tp->snd_recover))
8362 tp->snd_recover = tp->snd_una;
8364 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
8365 tp->snd_nxt = tp->snd_una;
8368 (rack->use_fixed_rate == 0) &&
8369 (rack->in_probe_rtt == 0) &&
8370 rack->rc_gp_dyn_mul &&
8371 rack->rc_always_pace) {
8372 /* Check if we are dragging bottom */
8373 rack_check_bottom_drag(tp, rack, so, acked);
8375 if (tp->snd_una == tp->snd_max) {
8376 /* Nothing left outstanding */
8377 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
8378 if (rack->r_ctl.rc_went_idle_time == 0)
8379 rack->r_ctl.rc_went_idle_time = 1;
8380 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
8381 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
8383 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
8384 /* Set need output so persist might get set */
8385 rack->r_wanted_output = 1;
8386 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
8387 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
8388 (sbavail(&so->so_snd) == 0) &&
8389 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
8391 * The socket was gone and the
8392 * peer sent data, time to
8396 /* tcp_close will kill the inp pre-log the Reset */
8397 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
8399 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
8404 *ofia = ourfinisacked;
8409 rack_collapsed_window(struct tcp_rack *rack)
8412 * Now we must walk the
8413 * send map and divide the
8414 * ones left stranded. These
8415 * guys can't cause us to abort
8416 * the connection and are really
8417 * "unsent". However if a buggy
8418 * client actually did keep some
8419 * of the data i.e. collapsed the win
8420 * and refused to ack and then opened
8421 * the win and acked that data. We would
8422 * get into an ack war, the simplier
8423 * method then of just pretending we
8424 * did not send those segments something
8427 struct rack_sendmap *rsm, *nrsm, fe, *insret;
8430 max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd;
8431 memset(&fe, 0, sizeof(fe));
8432 fe.r_start = max_seq;
8433 /* Find the first seq past or at maxseq */
8434 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
8436 /* Nothing to do strange */
8437 rack->rc_has_collapsed = 0;
8441 * Now do we need to split at
8442 * the collapse point?
8444 if (SEQ_GT(max_seq, rsm->r_start)) {
8445 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
8447 /* We can't get a rsm, mark all? */
8452 rack_clone_rsm(rack, nrsm, rsm, max_seq);
8453 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8455 if (insret != NULL) {
8456 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
8457 nrsm, insret, rack, rsm);
8460 if (rsm->r_in_tmap) {
8461 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8462 nrsm->r_in_tmap = 1;
8465 * Set in the new RSM as the
8466 * collapsed starting point
8471 counter_u64_add(rack_collapsed_win, 1);
8472 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) {
8473 nrsm->r_flags |= RACK_RWND_COLLAPSED;
8474 rack->rc_has_collapsed = 1;
8479 rack_un_collapse_window(struct tcp_rack *rack)
8481 struct rack_sendmap *rsm;
8483 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
8484 if (rsm->r_flags & RACK_RWND_COLLAPSED)
8485 rsm->r_flags &= ~RACK_RWND_COLLAPSED;
8489 rack->rc_has_collapsed = 0;
8493 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack,
8494 int32_t tlen, int32_t tfo_syn)
8496 if (DELAY_ACK(tp, tlen) || tfo_syn) {
8497 if (rack->rc_dack_mode &&
8499 (rack->rc_dack_toggle == 1)) {
8500 goto no_delayed_ack;
8502 rack_timer_cancel(tp, rack,
8503 rack->r_ctl.rc_rcvtime, __LINE__);
8504 tp->t_flags |= TF_DELACK;
8507 rack->r_wanted_output = 1;
8508 tp->t_flags |= TF_ACKNOW;
8509 if (rack->rc_dack_mode) {
8510 if (tp->t_flags & TF_DELACK)
8511 rack->rc_dack_toggle = 1;
8513 rack->rc_dack_toggle = 0;
8518 * Return value of 1, the TCB is unlocked and most
8519 * likely gone, return value of 0, the TCP is still
8523 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
8524 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
8525 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
8528 * Update window information. Don't look at window if no ACK: TAC's
8529 * send garbage on first SYN.
8533 struct tcp_rack *rack;
8535 rack = (struct tcp_rack *)tp->t_fb_ptr;
8536 INP_WLOCK_ASSERT(tp->t_inpcb);
8537 nsegs = max(1, m->m_pkthdr.lro_nsegs);
8538 if ((thflags & TH_ACK) &&
8539 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
8540 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
8541 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
8542 /* keep track of pure window updates */
8544 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
8545 KMOD_TCPSTAT_INC(tcps_rcvwinupd);
8546 tp->snd_wnd = tiwin;
8547 tp->snd_wl1 = th->th_seq;
8548 tp->snd_wl2 = th->th_ack;
8549 if (tp->snd_wnd > tp->max_sndwnd)
8550 tp->max_sndwnd = tp->snd_wnd;
8551 rack->r_wanted_output = 1;
8552 } else if (thflags & TH_ACK) {
8553 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
8554 tp->snd_wnd = tiwin;
8555 tp->snd_wl1 = th->th_seq;
8556 tp->snd_wl2 = th->th_ack;
8559 if (tp->snd_wnd < ctf_outstanding(tp))
8560 /* The peer collapsed the window */
8561 rack_collapsed_window(rack);
8562 else if (rack->rc_has_collapsed)
8563 rack_un_collapse_window(rack);
8564 /* Was persist timer active and now we have window space? */
8565 if ((rack->rc_in_persist != 0) &&
8566 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
8567 rack->r_ctl.rc_pace_min_segs))) {
8568 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime);
8569 tp->snd_nxt = tp->snd_max;
8570 /* Make sure we output to start the timer */
8571 rack->r_wanted_output = 1;
8573 /* Do we enter persists? */
8574 if ((rack->rc_in_persist == 0) &&
8575 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
8576 TCPS_HAVEESTABLISHED(tp->t_state) &&
8577 (tp->snd_max == tp->snd_una) &&
8578 sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
8579 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
8581 * Here the rwnd is less than
8582 * the pacing size, we are established,
8583 * nothing is outstanding, and there is
8584 * data to send. Enter persists.
8586 tp->snd_nxt = tp->snd_una;
8587 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
8589 if (tp->t_flags2 & TF2_DROP_AF_DATA) {
8594 * don't process the URG bit, ignore them drag
8597 tp->rcv_up = tp->rcv_nxt;
8598 INP_WLOCK_ASSERT(tp->t_inpcb);
8601 * Process the segment text, merging it into the TCP sequencing
8602 * queue, and arranging for acknowledgment of receipt if necessary.
8603 * This process logically involves adjusting tp->rcv_wnd as data is
8604 * presented to the user (this happens in tcp_usrreq.c, case
8605 * PRU_RCVD). If a FIN has already been received on this connection
8606 * then we just ignore the text.
8608 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
8609 IS_FASTOPEN(tp->t_flags));
8610 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
8611 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
8612 tcp_seq save_start = th->th_seq;
8613 tcp_seq save_rnxt = tp->rcv_nxt;
8614 int save_tlen = tlen;
8616 m_adj(m, drop_hdrlen); /* delayed header drop */
8618 * Insert segment which includes th into TCP reassembly
8619 * queue with control block tp. Set thflags to whether
8620 * reassembly now includes a segment with FIN. This handles
8621 * the common case inline (segment is the next to be
8622 * received on an established connection, and the queue is
8623 * empty), avoiding linkage into and removal from the queue
8624 * and repetition of various conversions. Set DELACK for
8625 * segments received in order, but ack immediately when
8626 * segments are out of order (so fast retransmit can work).
8628 if (th->th_seq == tp->rcv_nxt &&
8630 (TCPS_HAVEESTABLISHED(tp->t_state) ||
8632 #ifdef NETFLIX_SB_LIMITS
8633 u_int mcnt, appended;
8635 if (so->so_rcv.sb_shlim) {
8638 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
8639 CFO_NOSLEEP, NULL) == false) {
8640 counter_u64_add(tcp_sb_shlim_fails, 1);
8646 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn);
8647 tp->rcv_nxt += tlen;
8649 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
8650 (tp->t_fbyte_in == 0)) {
8651 tp->t_fbyte_in = ticks;
8652 if (tp->t_fbyte_in == 0)
8654 if (tp->t_fbyte_out && tp->t_fbyte_in)
8655 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
8657 thflags = th->th_flags & TH_FIN;
8658 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
8659 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
8660 SOCKBUF_LOCK(&so->so_rcv);
8661 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
8664 #ifdef NETFLIX_SB_LIMITS
8667 sbappendstream_locked(&so->so_rcv, m, 0);
8668 SOCKBUF_UNLOCK(&so->so_rcv);
8669 tp->t_flags |= TF_WAKESOR;
8670 #ifdef NETFLIX_SB_LIMITS
8671 if (so->so_rcv.sb_shlim && appended != mcnt)
8672 counter_fo_release(so->so_rcv.sb_shlim,
8677 * XXX: Due to the header drop above "th" is
8678 * theoretically invalid by now. Fortunately
8679 * m_adj() doesn't actually frees any mbufs when
8680 * trimming from the head.
8682 tcp_seq temp = save_start;
8683 thflags = tcp_reass(tp, th, &temp, &tlen, m);
8684 tp->t_flags |= TF_ACKNOW;
8686 if ((tp->t_flags & TF_SACK_PERMIT) && (save_tlen > 0)) {
8687 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
8689 * DSACK actually handled in the fastpath
8692 RACK_OPTS_INC(tcp_sack_path_1);
8693 tcp_update_sack_list(tp, save_start,
8694 save_start + save_tlen);
8695 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
8696 if ((tp->rcv_numsacks >= 1) &&
8697 (tp->sackblks[0].end == save_start)) {
8699 * Partial overlap, recorded at todrop
8702 RACK_OPTS_INC(tcp_sack_path_2a);
8703 tcp_update_sack_list(tp,
8704 tp->sackblks[0].start,
8705 tp->sackblks[0].end);
8707 RACK_OPTS_INC(tcp_sack_path_2b);
8708 tcp_update_dsack_list(tp, save_start,
8709 save_start + save_tlen);
8711 } else if (tlen >= save_tlen) {
8712 /* Update of sackblks. */
8713 RACK_OPTS_INC(tcp_sack_path_3);
8714 tcp_update_dsack_list(tp, save_start,
8715 save_start + save_tlen);
8716 } else if (tlen > 0) {
8717 RACK_OPTS_INC(tcp_sack_path_4);
8718 tcp_update_dsack_list(tp, save_start,
8728 * If FIN is received ACK the FIN and let the user know that the
8729 * connection is closing.
8731 if (thflags & TH_FIN) {
8732 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
8734 /* The socket upcall is handled by socantrcvmore. */
8735 tp->t_flags &= ~TF_WAKESOR;
8737 * If connection is half-synchronized (ie NEEDSYN
8738 * flag on) then delay ACK, so it may be piggybacked
8739 * when SYN is sent. Otherwise, since we received a
8740 * FIN then no more input can be expected, send ACK
8743 if (tp->t_flags & TF_NEEDSYN) {
8744 rack_timer_cancel(tp, rack,
8745 rack->r_ctl.rc_rcvtime, __LINE__);
8746 tp->t_flags |= TF_DELACK;
8748 tp->t_flags |= TF_ACKNOW;
8752 switch (tp->t_state) {
8754 * In SYN_RECEIVED and ESTABLISHED STATES enter the
8757 case TCPS_SYN_RECEIVED:
8758 tp->t_starttime = ticks;
8760 case TCPS_ESTABLISHED:
8761 rack_timer_cancel(tp, rack,
8762 rack->r_ctl.rc_rcvtime, __LINE__);
8763 tcp_state_change(tp, TCPS_CLOSE_WAIT);
8767 * If still in FIN_WAIT_1 STATE FIN has not been
8768 * acked so enter the CLOSING state.
8770 case TCPS_FIN_WAIT_1:
8771 rack_timer_cancel(tp, rack,
8772 rack->r_ctl.rc_rcvtime, __LINE__);
8773 tcp_state_change(tp, TCPS_CLOSING);
8777 * In FIN_WAIT_2 state enter the TIME_WAIT state,
8778 * starting the time-wait timer, turning off the
8779 * other standard timers.
8781 case TCPS_FIN_WAIT_2:
8782 rack_timer_cancel(tp, rack,
8783 rack->r_ctl.rc_rcvtime, __LINE__);
8789 * Return any desired output.
8791 if ((tp->t_flags & TF_ACKNOW) ||
8792 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
8793 rack->r_wanted_output = 1;
8795 INP_WLOCK_ASSERT(tp->t_inpcb);
8800 * Here nothing is really faster, its just that we
8801 * have broken out the fast-data path also just like
8805 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
8806 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
8807 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos)
8810 int32_t newsize = 0; /* automatic sockbuf scaling */
8811 struct tcp_rack *rack;
8812 #ifdef NETFLIX_SB_LIMITS
8813 u_int mcnt, appended;
8817 * The size of tcp_saveipgen must be the size of the max ip header,
8820 u_char tcp_saveipgen[IP6_HDR_LEN];
8821 struct tcphdr tcp_savetcp;
8826 * If last ACK falls within this segment's sequence numbers, record
8827 * the timestamp. NOTE that the test is modified according to the
8828 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
8830 if (__predict_false(th->th_seq != tp->rcv_nxt)) {
8833 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
8836 if (tiwin && tiwin != tp->snd_wnd) {
8839 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
8842 if (__predict_false((to->to_flags & TOF_TS) &&
8843 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
8846 if (__predict_false((th->th_ack != tp->snd_una))) {
8849 if (__predict_false(tlen > sbspace(&so->so_rcv))) {
8852 if ((to->to_flags & TOF_TS) != 0 &&
8853 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
8854 tp->ts_recent_age = tcp_ts_getticks();
8855 tp->ts_recent = to->to_tsval;
8857 rack = (struct tcp_rack *)tp->t_fb_ptr;
8859 * This is a pure, in-sequence data packet with nothing on the
8860 * reassembly queue and we have enough buffer space to take it.
8862 nsegs = max(1, m->m_pkthdr.lro_nsegs);
8864 #ifdef NETFLIX_SB_LIMITS
8865 if (so->so_rcv.sb_shlim) {
8868 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
8869 CFO_NOSLEEP, NULL) == false) {
8870 counter_u64_add(tcp_sb_shlim_fails, 1);
8876 /* Clean receiver SACK report if present */
8877 if (tp->rcv_numsacks)
8878 tcp_clean_sackreport(tp);
8879 KMOD_TCPSTAT_INC(tcps_preddat);
8880 tp->rcv_nxt += tlen;
8882 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
8883 (tp->t_fbyte_in == 0)) {
8884 tp->t_fbyte_in = ticks;
8885 if (tp->t_fbyte_in == 0)
8887 if (tp->t_fbyte_out && tp->t_fbyte_in)
8888 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
8891 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
8893 tp->snd_wl1 = th->th_seq;
8895 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
8897 tp->rcv_up = tp->rcv_nxt;
8898 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
8899 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
8901 if (so->so_options & SO_DEBUG)
8902 tcp_trace(TA_INPUT, ostate, tp,
8903 (void *)tcp_saveipgen, &tcp_savetcp, 0);
8905 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
8907 /* Add data to socket buffer. */
8908 SOCKBUF_LOCK(&so->so_rcv);
8909 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
8913 * Set new socket buffer size. Give up when limit is
8917 if (!sbreserve_locked(&so->so_rcv,
8919 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
8920 m_adj(m, drop_hdrlen); /* delayed header drop */
8921 #ifdef NETFLIX_SB_LIMITS
8924 sbappendstream_locked(&so->so_rcv, m, 0);
8925 ctf_calc_rwin(so, tp);
8927 SOCKBUF_UNLOCK(&so->so_rcv);
8928 tp->t_flags |= TF_WAKESOR;
8929 #ifdef NETFLIX_SB_LIMITS
8930 if (so->so_rcv.sb_shlim && mcnt != appended)
8931 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended);
8933 rack_handle_delayed_ack(tp, rack, tlen, 0);
8934 if (tp->snd_una == tp->snd_max)
8935 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
8940 * This subfunction is used to try to highly optimize the
8941 * fast path. We again allow window updates that are
8942 * in sequence to remain in the fast-path. We also add
8943 * in the __predict's to attempt to help the compiler.
8944 * Note that if we return a 0, then we can *not* process
8945 * it and the caller should push the packet into the
8949 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
8950 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
8951 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
8957 * The size of tcp_saveipgen must be the size of the max ip header,
8960 u_char tcp_saveipgen[IP6_HDR_LEN];
8961 struct tcphdr tcp_savetcp;
8964 int32_t under_pacing = 0;
8965 struct tcp_rack *rack;
8967 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
8968 /* Old ack, behind (or duplicate to) the last one rcv'd */
8971 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
8972 /* Above what we have sent? */
8975 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
8976 /* We are retransmitting */
8979 if (__predict_false(tiwin == 0)) {
8983 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
8984 /* We need a SYN or a FIN, unlikely.. */
8987 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
8988 /* Timestamp is behind .. old ack with seq wrap? */
8991 if (__predict_false(IN_RECOVERY(tp->t_flags))) {
8992 /* Still recovering */
8995 rack = (struct tcp_rack *)tp->t_fb_ptr;
8996 if (rack->r_ctl.rc_sacked) {
8997 /* We have sack holes on our scoreboard */
9000 /* Ok if we reach here, we can process a fast-ack */
9001 if (rack->rc_gp_filled &&
9002 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
9005 nsegs = max(1, m->m_pkthdr.lro_nsegs);
9006 rack_log_ack(tp, to, th);
9007 /* Did the window get updated? */
9008 if (tiwin != tp->snd_wnd) {
9009 tp->snd_wnd = tiwin;
9010 tp->snd_wl1 = th->th_seq;
9011 if (tp->snd_wnd > tp->max_sndwnd)
9012 tp->max_sndwnd = tp->snd_wnd;
9014 /* Do we exit persists? */
9015 if ((rack->rc_in_persist != 0) &&
9016 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
9017 rack->r_ctl.rc_pace_min_segs))) {
9018 rack_exit_persist(tp, rack, cts);
9020 /* Do we enter persists? */
9021 if ((rack->rc_in_persist == 0) &&
9022 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
9023 TCPS_HAVEESTABLISHED(tp->t_state) &&
9024 (tp->snd_max == tp->snd_una) &&
9025 sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
9026 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
9028 * Here the rwnd is less than
9029 * the pacing size, we are established,
9030 * nothing is outstanding, and there is
9031 * data to send. Enter persists.
9033 tp->snd_nxt = tp->snd_una;
9034 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
9037 * If last ACK falls within this segment's sequence numbers, record
9038 * the timestamp. NOTE that the test is modified according to the
9039 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
9041 if ((to->to_flags & TOF_TS) != 0 &&
9042 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
9043 tp->ts_recent_age = tcp_ts_getticks();
9044 tp->ts_recent = to->to_tsval;
9047 * This is a pure ack for outstanding data.
9049 KMOD_TCPSTAT_INC(tcps_predack);
9052 * "bad retransmit" recovery.
9054 if (tp->t_flags & TF_PREVVALID) {
9055 tp->t_flags &= ~TF_PREVVALID;
9056 if (tp->t_rxtshift == 1 &&
9057 (int)(ticks - tp->t_badrxtwin) < 0)
9058 rack_cong_signal(tp, th, CC_RTO_ERR);
9061 * Recalculate the transmit timer / rtt.
9063 * Some boxes send broken timestamp replies during the SYN+ACK
9064 * phase, ignore timestamps of 0 or we could calculate a huge RTT
9065 * and blow up the retransmit timer.
9067 acked = BYTES_THIS_ACK(tp, th);
9070 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
9071 hhook_run_tcp_est_in(tp, th, to);
9074 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
9075 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
9076 sbdrop(&so->so_snd, acked);
9078 /* assure we are not backed off */
9080 rack->rc_tlp_in_progress = 0;
9081 rack->r_ctl.rc_tlp_cnt_out = 0;
9083 * If it is the RXT timer we want to
9084 * stop it, so we can restart a TLP.
9086 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
9087 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9088 #ifdef NETFLIX_HTTP_LOGGING
9089 tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
9093 * Let the congestion control algorithm update congestion control
9094 * related information. This typically means increasing the
9095 * congestion window.
9097 rack_ack_received(tp, rack, th, nsegs, CC_ACK, 0);
9099 tp->snd_una = th->th_ack;
9100 if (tp->snd_wnd < ctf_outstanding(tp)) {
9101 /* The peer collapsed the window */
9102 rack_collapsed_window(rack);
9103 } else if (rack->rc_has_collapsed)
9104 rack_un_collapse_window(rack);
9107 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
9109 tp->snd_wl2 = th->th_ack;
9112 /* ND6_HINT(tp); *//* Some progress has been made. */
9115 * If all outstanding data are acked, stop retransmit timer,
9116 * otherwise restart timer using current (possibly backed-off)
9117 * value. If process is waiting for space, wakeup/selwakeup/signal.
9118 * If data are ready to send, let tcp_output decide between more
9119 * output or persist.
9122 if (so->so_options & SO_DEBUG)
9123 tcp_trace(TA_INPUT, ostate, tp,
9124 (void *)tcp_saveipgen,
9128 (rack->use_fixed_rate == 0) &&
9129 (rack->in_probe_rtt == 0) &&
9130 rack->rc_gp_dyn_mul &&
9131 rack->rc_always_pace) {
9132 /* Check if we are dragging bottom */
9133 rack_check_bottom_drag(tp, rack, so, acked);
9135 if (tp->snd_una == tp->snd_max) {
9136 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
9137 if (rack->r_ctl.rc_went_idle_time == 0)
9138 rack->r_ctl.rc_went_idle_time = 1;
9139 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
9140 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
9142 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9144 /* Wake up the socket if we have room to write more */
9145 tp->t_flags |= TF_WAKESOW;
9146 if (sbavail(&so->so_snd)) {
9147 rack->r_wanted_output = 1;
9153 * Return value of 1, the TCB is unlocked and most
9154 * likely gone, return value of 0, the TCP is still
9158 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
9159 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
9160 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
9162 int32_t ret_val = 0;
9164 int32_t ourfinisacked = 0;
9165 struct tcp_rack *rack;
9167 ctf_calc_rwin(so, tp);
9169 * If the state is SYN_SENT: if seg contains an ACK, but not for our
9170 * SYN, drop the input. if seg contains a RST, then drop the
9171 * connection. if seg does not contain SYN, then drop it. Otherwise
9172 * this is an acceptable SYN segment initialize tp->rcv_nxt and
9173 * tp->irs if seg contains ack then advance tp->snd_una if seg
9174 * contains an ECE and ECN support is enabled, the stream is ECN
9175 * capable. if SYN has been acked change to ESTABLISHED else
9176 * SYN_RCVD state arrange for segment to be acked (eventually)
9177 * continue processing rest of data/controls.
9179 if ((thflags & TH_ACK) &&
9180 (SEQ_LEQ(th->th_ack, tp->iss) ||
9181 SEQ_GT(th->th_ack, tp->snd_max))) {
9182 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
9183 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
9186 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
9187 TCP_PROBE5(connect__refused, NULL, tp,
9188 mtod(m, const char *), tp, th);
9189 tp = tcp_drop(tp, ECONNREFUSED);
9193 if (thflags & TH_RST) {
9197 if (!(thflags & TH_SYN)) {
9201 tp->irs = th->th_seq;
9203 rack = (struct tcp_rack *)tp->t_fb_ptr;
9204 if (thflags & TH_ACK) {
9205 int tfo_partial = 0;
9207 KMOD_TCPSTAT_INC(tcps_connects);
9210 mac_socketpeer_set_from_mbuf(m, so);
9212 /* Do window scaling on this connection? */
9213 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
9214 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
9215 tp->rcv_scale = tp->request_r_scale;
9217 tp->rcv_adv += min(tp->rcv_wnd,
9218 TCP_MAXWIN << tp->rcv_scale);
9220 * If not all the data that was sent in the TFO SYN
9221 * has been acked, resend the remainder right away.
9223 if (IS_FASTOPEN(tp->t_flags) &&
9224 (tp->snd_una != tp->snd_max)) {
9225 tp->snd_nxt = th->th_ack;
9229 * If there's data, delay ACK; if there's also a FIN ACKNOW
9230 * will be turned on later.
9232 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) {
9233 rack_timer_cancel(tp, rack,
9234 rack->r_ctl.rc_rcvtime, __LINE__);
9235 tp->t_flags |= TF_DELACK;
9237 rack->r_wanted_output = 1;
9238 tp->t_flags |= TF_ACKNOW;
9239 rack->rc_dack_toggle = 0;
9241 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) &&
9242 (V_tcp_do_ecn == 1)) {
9243 tp->t_flags2 |= TF2_ECN_PERMIT;
9244 KMOD_TCPSTAT_INC(tcps_ecn_shs);
9246 if (SEQ_GT(th->th_ack, tp->snd_una)) {
9248 * We advance snd_una for the
9249 * fast open case. If th_ack is
9250 * acknowledging data beyond
9251 * snd_una we can't just call
9252 * ack-processing since the
9253 * data stream in our send-map
9254 * will start at snd_una + 1 (one
9255 * beyond the SYN). If its just
9256 * equal we don't need to do that
9257 * and there is no send_map.
9262 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
9263 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
9265 tp->t_starttime = ticks;
9266 if (tp->t_flags & TF_NEEDFIN) {
9267 tcp_state_change(tp, TCPS_FIN_WAIT_1);
9268 tp->t_flags &= ~TF_NEEDFIN;
9271 tcp_state_change(tp, TCPS_ESTABLISHED);
9272 TCP_PROBE5(connect__established, NULL, tp,
9273 mtod(m, const char *), tp, th);
9274 rack_cc_conn_init(tp);
9278 * Received initial SYN in SYN-SENT[*] state => simultaneous
9279 * open. If segment contains CC option and there is a
9280 * cached CC, apply TAO test. If it succeeds, connection is *
9281 * half-synchronized. Otherwise, do 3-way handshake:
9282 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
9283 * there was no CC option, clear cached CC value.
9285 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
9286 tcp_state_change(tp, TCPS_SYN_RECEIVED);
9288 INP_WLOCK_ASSERT(tp->t_inpcb);
9290 * Advance th->th_seq to correspond to first data byte. If data,
9291 * trim to stay within window, dropping FIN if necessary.
9294 if (tlen > tp->rcv_wnd) {
9295 todrop = tlen - tp->rcv_wnd;
9299 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin);
9300 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
9302 tp->snd_wl1 = th->th_seq - 1;
9303 tp->rcv_up = th->th_seq;
9305 * Client side of transaction: already sent SYN and data. If the
9306 * remote host used T/TCP to validate the SYN, our data will be
9307 * ACK'd; if so, enter normal data segment processing in the middle
9308 * of step 5, ack processing. Otherwise, goto step 6.
9310 if (thflags & TH_ACK) {
9311 /* For syn-sent we need to possibly update the rtt */
9312 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
9315 t = tcp_ts_getticks() - to->to_tsecr;
9316 if (!tp->t_rttlow || tp->t_rttlow > t)
9318 tcp_rack_xmit_timer(rack, t + 1, 1, (t * HPTS_USEC_IN_MSEC), 0, NULL, 2);
9319 tcp_rack_xmit_timer_commit(rack, tp);
9321 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
9323 /* We may have changed to FIN_WAIT_1 above */
9324 if (tp->t_state == TCPS_FIN_WAIT_1) {
9326 * In FIN_WAIT_1 STATE in addition to the processing
9327 * for the ESTABLISHED state if our FIN is now
9328 * acknowledged then enter FIN_WAIT_2.
9330 if (ourfinisacked) {
9332 * If we can't receive any more data, then
9333 * closing user can proceed. Starting the
9334 * timer is contrary to the specification,
9335 * but if we don't get a FIN we'll hang
9338 * XXXjl: we should release the tp also, and
9339 * use a compressed state.
9341 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
9342 soisdisconnected(so);
9343 tcp_timer_activate(tp, TT_2MSL,
9344 (tcp_fast_finwait2_recycle ?
9345 tcp_finwait2_timeout :
9348 tcp_state_change(tp, TCPS_FIN_WAIT_2);
9352 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9353 tiwin, thflags, nxt_pkt));
9357 * Return value of 1, the TCB is unlocked and most
9358 * likely gone, return value of 0, the TCP is still
9362 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
9363 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
9364 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
9366 struct tcp_rack *rack;
9367 int32_t ret_val = 0;
9368 int32_t ourfinisacked = 0;
9370 ctf_calc_rwin(so, tp);
9371 if ((thflags & TH_ACK) &&
9372 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
9373 SEQ_GT(th->th_ack, tp->snd_max))) {
9374 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
9375 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
9378 rack = (struct tcp_rack *)tp->t_fb_ptr;
9379 if (IS_FASTOPEN(tp->t_flags)) {
9381 * When a TFO connection is in SYN_RECEIVED, the
9382 * only valid packets are the initial SYN, a
9383 * retransmit/copy of the initial SYN (possibly with
9384 * a subset of the original data), a valid ACK, a
9387 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
9388 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
9389 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
9391 } else if (thflags & TH_SYN) {
9392 /* non-initial SYN is ignored */
9393 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
9394 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
9395 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
9396 ctf_do_drop(m, NULL);
9399 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
9400 ctf_do_drop(m, NULL);
9404 if ((thflags & TH_RST) ||
9405 (tp->t_fin_is_rst && (thflags & TH_FIN)))
9406 return (ctf_process_rst(m, th, so, tp));
9408 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
9409 * it's less than ts_recent, drop it.
9411 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
9412 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
9413 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
9417 * In the SYN-RECEIVED state, validate that the packet belongs to
9418 * this connection before trimming the data to fit the receive
9419 * window. Check the sequence number versus IRS since we know the
9420 * sequence numbers haven't wrapped. This is a partial fix for the
9421 * "LAND" DoS attack.
9423 if (SEQ_LT(th->th_seq, tp->irs)) {
9424 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
9425 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
9428 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
9432 * If last ACK falls within this segment's sequence numbers, record
9433 * its timestamp. NOTE: 1) That the test incorporates suggestions
9434 * from the latest proposal of the tcplw@cray.com list (Braden
9435 * 1993/04/26). 2) That updating only on newer timestamps interferes
9436 * with our earlier PAWS tests, so this check should be solely
9437 * predicated on the sequence space of this segment. 3) That we
9438 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
9439 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
9440 * SEG.Len, This modified check allows us to overcome RFC1323's
9441 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
9442 * p.869. In such cases, we can still calculate the RTT correctly
9443 * when RCV.NXT == Last.ACK.Sent.
9445 if ((to->to_flags & TOF_TS) != 0 &&
9446 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
9447 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
9448 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
9449 tp->ts_recent_age = tcp_ts_getticks();
9450 tp->ts_recent = to->to_tsval;
9452 tp->snd_wnd = tiwin;
9454 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
9455 * is on (half-synchronized state), then queue data for later
9456 * processing; else drop segment and return.
9458 if ((thflags & TH_ACK) == 0) {
9459 if (IS_FASTOPEN(tp->t_flags)) {
9460 rack_cc_conn_init(tp);
9462 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9463 tiwin, thflags, nxt_pkt));
9465 KMOD_TCPSTAT_INC(tcps_connects);
9467 /* Do window scaling? */
9468 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
9469 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
9470 tp->rcv_scale = tp->request_r_scale;
9473 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* ->
9476 tp->t_starttime = ticks;
9477 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
9478 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
9479 tp->t_tfo_pending = NULL;
9481 if (tp->t_flags & TF_NEEDFIN) {
9482 tcp_state_change(tp, TCPS_FIN_WAIT_1);
9483 tp->t_flags &= ~TF_NEEDFIN;
9485 tcp_state_change(tp, TCPS_ESTABLISHED);
9486 TCP_PROBE5(accept__established, NULL, tp,
9487 mtod(m, const char *), tp, th);
9489 * TFO connections call cc_conn_init() during SYN
9490 * processing. Calling it again here for such connections
9491 * is not harmless as it would undo the snd_cwnd reduction
9492 * that occurs when a TFO SYN|ACK is retransmitted.
9494 if (!IS_FASTOPEN(tp->t_flags))
9495 rack_cc_conn_init(tp);
9498 * Account for the ACK of our SYN prior to
9499 * regular ACK processing below, except for
9500 * simultaneous SYN, which is handled later.
9502 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
9505 * If segment contains data or ACK, will call tcp_reass() later; if
9506 * not, do so now to pass queued data to user.
9508 if (tlen == 0 && (thflags & TH_FIN) == 0)
9509 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
9511 tp->snd_wl1 = th->th_seq - 1;
9512 /* For syn-recv we need to possibly update the rtt */
9513 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
9516 t = tcp_ts_getticks() - to->to_tsecr;
9517 if (!tp->t_rttlow || tp->t_rttlow > t)
9519 tcp_rack_xmit_timer(rack, t + 1, 1, (t * HPTS_USEC_IN_MSEC), 0, NULL, 2);
9520 tcp_rack_xmit_timer_commit(rack, tp);
9522 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
9525 if (tp->t_state == TCPS_FIN_WAIT_1) {
9526 /* We could have went to FIN_WAIT_1 (or EST) above */
9528 * In FIN_WAIT_1 STATE in addition to the processing for the
9529 * ESTABLISHED state if our FIN is now acknowledged then
9532 if (ourfinisacked) {
9534 * If we can't receive any more data, then closing
9535 * user can proceed. Starting the timer is contrary
9536 * to the specification, but if we don't get a FIN
9537 * we'll hang forever.
9539 * XXXjl: we should release the tp also, and use a
9542 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
9543 soisdisconnected(so);
9544 tcp_timer_activate(tp, TT_2MSL,
9545 (tcp_fast_finwait2_recycle ?
9546 tcp_finwait2_timeout :
9549 tcp_state_change(tp, TCPS_FIN_WAIT_2);
9552 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9553 tiwin, thflags, nxt_pkt));
9557 * Return value of 1, the TCB is unlocked and most
9558 * likely gone, return value of 0, the TCP is still
9562 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
9563 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
9564 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
9566 int32_t ret_val = 0;
9567 struct tcp_rack *rack;
9570 * Header prediction: check for the two common cases of a
9571 * uni-directional data xfer. If the packet has no control flags,
9572 * is in-sequence, the window didn't change and we're not
9573 * retransmitting, it's a candidate. If the length is zero and the
9574 * ack moved forward, we're the sender side of the xfer. Just free
9575 * the data acked & wake any higher level process that was blocked
9576 * waiting for space. If the length is non-zero and the ack didn't
9577 * move, we're the receiver side. If we're getting packets in-order
9578 * (the reassembly queue is empty), add the data toc The socket
9579 * buffer and note that we need a delayed ack. Make sure that the
9580 * hidden state-flags are also off. Since we check for
9581 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
9583 rack = (struct tcp_rack *)tp->t_fb_ptr;
9584 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
9585 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) &&
9586 __predict_true(SEGQ_EMPTY(tp)) &&
9587 __predict_true(th->th_seq == tp->rcv_nxt)) {
9589 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
9590 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
9594 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
9595 tiwin, nxt_pkt, iptos)) {
9600 ctf_calc_rwin(so, tp);
9602 if ((thflags & TH_RST) ||
9603 (tp->t_fin_is_rst && (thflags & TH_FIN)))
9604 return (ctf_process_rst(m, th, so, tp));
9607 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
9608 * synchronized state.
9610 if (thflags & TH_SYN) {
9611 ctf_challenge_ack(m, th, tp, &ret_val);
9615 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
9616 * it's less than ts_recent, drop it.
9618 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
9619 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
9620 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
9623 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
9627 * If last ACK falls within this segment's sequence numbers, record
9628 * its timestamp. NOTE: 1) That the test incorporates suggestions
9629 * from the latest proposal of the tcplw@cray.com list (Braden
9630 * 1993/04/26). 2) That updating only on newer timestamps interferes
9631 * with our earlier PAWS tests, so this check should be solely
9632 * predicated on the sequence space of this segment. 3) That we
9633 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
9634 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
9635 * SEG.Len, This modified check allows us to overcome RFC1323's
9636 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
9637 * p.869. In such cases, we can still calculate the RTT correctly
9638 * when RCV.NXT == Last.ACK.Sent.
9640 if ((to->to_flags & TOF_TS) != 0 &&
9641 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
9642 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
9643 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
9644 tp->ts_recent_age = tcp_ts_getticks();
9645 tp->ts_recent = to->to_tsval;
9648 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
9649 * is on (half-synchronized state), then queue data for later
9650 * processing; else drop segment and return.
9652 if ((thflags & TH_ACK) == 0) {
9653 if (tp->t_flags & TF_NEEDSYN) {
9654 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9655 tiwin, thflags, nxt_pkt));
9657 } else if (tp->t_flags & TF_ACKNOW) {
9658 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
9659 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output= 1;
9662 ctf_do_drop(m, NULL);
9669 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
9672 if (sbavail(&so->so_snd)) {
9673 if (ctf_progress_timeout_check(tp, true)) {
9674 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
9675 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
9676 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
9680 /* State changes only happen in rack_process_data() */
9681 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9682 tiwin, thflags, nxt_pkt));
9686 * Return value of 1, the TCB is unlocked and most
9687 * likely gone, return value of 0, the TCP is still
9691 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
9692 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
9693 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
9695 int32_t ret_val = 0;
9697 ctf_calc_rwin(so, tp);
9698 if ((thflags & TH_RST) ||
9699 (tp->t_fin_is_rst && (thflags & TH_FIN)))
9700 return (ctf_process_rst(m, th, so, tp));
9702 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
9703 * synchronized state.
9705 if (thflags & TH_SYN) {
9706 ctf_challenge_ack(m, th, tp, &ret_val);
9710 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
9711 * it's less than ts_recent, drop it.
9713 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
9714 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
9715 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
9718 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
9722 * If last ACK falls within this segment's sequence numbers, record
9723 * its timestamp. NOTE: 1) That the test incorporates suggestions
9724 * from the latest proposal of the tcplw@cray.com list (Braden
9725 * 1993/04/26). 2) That updating only on newer timestamps interferes
9726 * with our earlier PAWS tests, so this check should be solely
9727 * predicated on the sequence space of this segment. 3) That we
9728 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
9729 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
9730 * SEG.Len, This modified check allows us to overcome RFC1323's
9731 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
9732 * p.869. In such cases, we can still calculate the RTT correctly
9733 * when RCV.NXT == Last.ACK.Sent.
9735 if ((to->to_flags & TOF_TS) != 0 &&
9736 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
9737 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
9738 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
9739 tp->ts_recent_age = tcp_ts_getticks();
9740 tp->ts_recent = to->to_tsval;
9743 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
9744 * is on (half-synchronized state), then queue data for later
9745 * processing; else drop segment and return.
9747 if ((thflags & TH_ACK) == 0) {
9748 if (tp->t_flags & TF_NEEDSYN) {
9749 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9750 tiwin, thflags, nxt_pkt));
9752 } else if (tp->t_flags & TF_ACKNOW) {
9753 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
9754 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
9757 ctf_do_drop(m, NULL);
9764 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
9767 if (sbavail(&so->so_snd)) {
9768 if (ctf_progress_timeout_check(tp, true)) {
9769 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
9770 tp, tick, PROGRESS_DROP, __LINE__);
9771 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
9772 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
9776 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9777 tiwin, thflags, nxt_pkt));
9781 rack_check_data_after_close(struct mbuf *m,
9782 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
9784 struct tcp_rack *rack;
9786 rack = (struct tcp_rack *)tp->t_fb_ptr;
9787 if (rack->rc_allow_data_af_clo == 0) {
9789 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
9790 /* tcp_close will kill the inp pre-log the Reset */
9791 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
9793 KMOD_TCPSTAT_INC(tcps_rcvafterclose);
9794 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
9797 if (sbavail(&so->so_snd) == 0)
9799 /* Ok we allow data that is ignored and a followup reset */
9800 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
9801 tp->rcv_nxt = th->th_seq + *tlen;
9802 tp->t_flags2 |= TF2_DROP_AF_DATA;
9803 rack->r_wanted_output = 1;
9809 * Return value of 1, the TCB is unlocked and most
9810 * likely gone, return value of 0, the TCP is still
9814 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
9815 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
9816 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
9818 int32_t ret_val = 0;
9819 int32_t ourfinisacked = 0;
9821 ctf_calc_rwin(so, tp);
9823 if ((thflags & TH_RST) ||
9824 (tp->t_fin_is_rst && (thflags & TH_FIN)))
9825 return (ctf_process_rst(m, th, so, tp));
9827 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
9828 * synchronized state.
9830 if (thflags & TH_SYN) {
9831 ctf_challenge_ack(m, th, tp, &ret_val);
9835 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
9836 * it's less than ts_recent, drop it.
9838 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
9839 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
9840 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
9843 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
9847 * If new data are received on a connection after the user processes
9848 * are gone, then RST the other end.
9850 if ((so->so_state & SS_NOFDREF) && tlen) {
9851 if (rack_check_data_after_close(m, tp, &tlen, th, so))
9855 * If last ACK falls within this segment's sequence numbers, record
9856 * its timestamp. NOTE: 1) That the test incorporates suggestions
9857 * from the latest proposal of the tcplw@cray.com list (Braden
9858 * 1993/04/26). 2) That updating only on newer timestamps interferes
9859 * with our earlier PAWS tests, so this check should be solely
9860 * predicated on the sequence space of this segment. 3) That we
9861 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
9862 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
9863 * SEG.Len, This modified check allows us to overcome RFC1323's
9864 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
9865 * p.869. In such cases, we can still calculate the RTT correctly
9866 * when RCV.NXT == Last.ACK.Sent.
9868 if ((to->to_flags & TOF_TS) != 0 &&
9869 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
9870 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
9871 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
9872 tp->ts_recent_age = tcp_ts_getticks();
9873 tp->ts_recent = to->to_tsval;
9876 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
9877 * is on (half-synchronized state), then queue data for later
9878 * processing; else drop segment and return.
9880 if ((thflags & TH_ACK) == 0) {
9881 if (tp->t_flags & TF_NEEDSYN) {
9882 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9883 tiwin, thflags, nxt_pkt));
9884 } else if (tp->t_flags & TF_ACKNOW) {
9885 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
9886 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
9889 ctf_do_drop(m, NULL);
9896 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
9899 if (ourfinisacked) {
9901 * If we can't receive any more data, then closing user can
9902 * proceed. Starting the timer is contrary to the
9903 * specification, but if we don't get a FIN we'll hang
9906 * XXXjl: we should release the tp also, and use a
9909 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
9910 soisdisconnected(so);
9911 tcp_timer_activate(tp, TT_2MSL,
9912 (tcp_fast_finwait2_recycle ?
9913 tcp_finwait2_timeout :
9916 tcp_state_change(tp, TCPS_FIN_WAIT_2);
9918 if (sbavail(&so->so_snd)) {
9919 if (ctf_progress_timeout_check(tp, true)) {
9920 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
9921 tp, tick, PROGRESS_DROP, __LINE__);
9922 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
9923 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
9927 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9928 tiwin, thflags, nxt_pkt));
9932 * Return value of 1, the TCB is unlocked and most
9933 * likely gone, return value of 0, the TCP is still
9937 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
9938 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
9939 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
9941 int32_t ret_val = 0;
9942 int32_t ourfinisacked = 0;
9944 ctf_calc_rwin(so, tp);
9946 if ((thflags & TH_RST) ||
9947 (tp->t_fin_is_rst && (thflags & TH_FIN)))
9948 return (ctf_process_rst(m, th, so, tp));
9950 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
9951 * synchronized state.
9953 if (thflags & TH_SYN) {
9954 ctf_challenge_ack(m, th, tp, &ret_val);
9958 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
9959 * it's less than ts_recent, drop it.
9961 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
9962 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
9963 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
9966 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
9970 * If new data are received on a connection after the user processes
9971 * are gone, then RST the other end.
9973 if ((so->so_state & SS_NOFDREF) && tlen) {
9974 if (rack_check_data_after_close(m, tp, &tlen, th, so))
9978 * If last ACK falls within this segment's sequence numbers, record
9979 * its timestamp. NOTE: 1) That the test incorporates suggestions
9980 * from the latest proposal of the tcplw@cray.com list (Braden
9981 * 1993/04/26). 2) That updating only on newer timestamps interferes
9982 * with our earlier PAWS tests, so this check should be solely
9983 * predicated on the sequence space of this segment. 3) That we
9984 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
9985 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
9986 * SEG.Len, This modified check allows us to overcome RFC1323's
9987 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
9988 * p.869. In such cases, we can still calculate the RTT correctly
9989 * when RCV.NXT == Last.ACK.Sent.
9991 if ((to->to_flags & TOF_TS) != 0 &&
9992 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
9993 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
9994 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
9995 tp->ts_recent_age = tcp_ts_getticks();
9996 tp->ts_recent = to->to_tsval;
9999 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
10000 * is on (half-synchronized state), then queue data for later
10001 * processing; else drop segment and return.
10003 if ((thflags & TH_ACK) == 0) {
10004 if (tp->t_flags & TF_NEEDSYN) {
10005 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10006 tiwin, thflags, nxt_pkt));
10007 } else if (tp->t_flags & TF_ACKNOW) {
10008 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
10009 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output= 1;
10012 ctf_do_drop(m, NULL);
10019 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
10022 if (ourfinisacked) {
10027 if (sbavail(&so->so_snd)) {
10028 if (ctf_progress_timeout_check(tp, true)) {
10029 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
10030 tp, tick, PROGRESS_DROP, __LINE__);
10031 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
10032 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10036 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10037 tiwin, thflags, nxt_pkt));
10041 * Return value of 1, the TCB is unlocked and most
10042 * likely gone, return value of 0, the TCP is still
10046 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
10047 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10048 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
10050 int32_t ret_val = 0;
10051 int32_t ourfinisacked = 0;
10053 ctf_calc_rwin(so, tp);
10055 if ((thflags & TH_RST) ||
10056 (tp->t_fin_is_rst && (thflags & TH_FIN)))
10057 return (ctf_process_rst(m, th, so, tp));
10059 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
10060 * synchronized state.
10062 if (thflags & TH_SYN) {
10063 ctf_challenge_ack(m, th, tp, &ret_val);
10067 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
10068 * it's less than ts_recent, drop it.
10070 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
10071 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
10072 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
10075 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
10079 * If new data are received on a connection after the user processes
10080 * are gone, then RST the other end.
10082 if ((so->so_state & SS_NOFDREF) && tlen) {
10083 if (rack_check_data_after_close(m, tp, &tlen, th, so))
10087 * If last ACK falls within this segment's sequence numbers, record
10088 * its timestamp. NOTE: 1) That the test incorporates suggestions
10089 * from the latest proposal of the tcplw@cray.com list (Braden
10090 * 1993/04/26). 2) That updating only on newer timestamps interferes
10091 * with our earlier PAWS tests, so this check should be solely
10092 * predicated on the sequence space of this segment. 3) That we
10093 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
10094 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
10095 * SEG.Len, This modified check allows us to overcome RFC1323's
10096 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
10097 * p.869. In such cases, we can still calculate the RTT correctly
10098 * when RCV.NXT == Last.ACK.Sent.
10100 if ((to->to_flags & TOF_TS) != 0 &&
10101 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
10102 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
10103 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
10104 tp->ts_recent_age = tcp_ts_getticks();
10105 tp->ts_recent = to->to_tsval;
10108 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
10109 * is on (half-synchronized state), then queue data for later
10110 * processing; else drop segment and return.
10112 if ((thflags & TH_ACK) == 0) {
10113 if (tp->t_flags & TF_NEEDSYN) {
10114 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10115 tiwin, thflags, nxt_pkt));
10116 } else if (tp->t_flags & TF_ACKNOW) {
10117 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
10118 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
10121 ctf_do_drop(m, NULL);
10126 * case TCPS_LAST_ACK: Ack processing.
10128 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
10131 if (ourfinisacked) {
10132 tp = tcp_close(tp);
10133 ctf_do_drop(m, tp);
10136 if (sbavail(&so->so_snd)) {
10137 if (ctf_progress_timeout_check(tp, true)) {
10138 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
10139 tp, tick, PROGRESS_DROP, __LINE__);
10140 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
10141 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10145 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10146 tiwin, thflags, nxt_pkt));
10150 * Return value of 1, the TCB is unlocked and most
10151 * likely gone, return value of 0, the TCP is still
10155 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
10156 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10157 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
10159 int32_t ret_val = 0;
10160 int32_t ourfinisacked = 0;
10162 ctf_calc_rwin(so, tp);
10164 /* Reset receive buffer auto scaling when not in bulk receive mode. */
10165 if ((thflags & TH_RST) ||
10166 (tp->t_fin_is_rst && (thflags & TH_FIN)))
10167 return (ctf_process_rst(m, th, so, tp));
10169 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
10170 * synchronized state.
10172 if (thflags & TH_SYN) {
10173 ctf_challenge_ack(m, th, tp, &ret_val);
10177 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
10178 * it's less than ts_recent, drop it.
10180 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
10181 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
10182 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
10185 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
10189 * If new data are received on a connection after the user processes
10190 * are gone, then RST the other end.
10192 if ((so->so_state & SS_NOFDREF) &&
10194 if (rack_check_data_after_close(m, tp, &tlen, th, so))
10198 * If last ACK falls within this segment's sequence numbers, record
10199 * its timestamp. NOTE: 1) That the test incorporates suggestions
10200 * from the latest proposal of the tcplw@cray.com list (Braden
10201 * 1993/04/26). 2) That updating only on newer timestamps interferes
10202 * with our earlier PAWS tests, so this check should be solely
10203 * predicated on the sequence space of this segment. 3) That we
10204 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
10205 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
10206 * SEG.Len, This modified check allows us to overcome RFC1323's
10207 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
10208 * p.869. In such cases, we can still calculate the RTT correctly
10209 * when RCV.NXT == Last.ACK.Sent.
10211 if ((to->to_flags & TOF_TS) != 0 &&
10212 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
10213 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
10214 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
10215 tp->ts_recent_age = tcp_ts_getticks();
10216 tp->ts_recent = to->to_tsval;
10219 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
10220 * is on (half-synchronized state), then queue data for later
10221 * processing; else drop segment and return.
10223 if ((thflags & TH_ACK) == 0) {
10224 if (tp->t_flags & TF_NEEDSYN) {
10225 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10226 tiwin, thflags, nxt_pkt));
10227 } else if (tp->t_flags & TF_ACKNOW) {
10228 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
10229 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
10232 ctf_do_drop(m, NULL);
10239 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
10242 if (sbavail(&so->so_snd)) {
10243 if (ctf_progress_timeout_check(tp, true)) {
10244 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
10245 tp, tick, PROGRESS_DROP, __LINE__);
10246 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
10247 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10251 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10252 tiwin, thflags, nxt_pkt));
10256 rack_clear_rate_sample(struct tcp_rack *rack)
10258 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
10259 rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
10260 rack->r_ctl.rack_rs.rs_rtt_tot = 0;
10264 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line)
10266 uint64_t bw_est, rate_wanted;
10270 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs;
10271 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs)
10273 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp);
10274 if (rack->use_fixed_rate || rack->rc_force_max_seg) {
10275 if (user_max != rack->r_ctl.rc_pace_max_segs)
10278 if (rack->rc_force_max_seg) {
10279 rack->r_ctl.rc_pace_max_segs = user_max;
10280 } else if (rack->use_fixed_rate) {
10281 bw_est = rack_get_bw(rack);
10282 if ((rack->r_ctl.crte == NULL) ||
10283 (bw_est != rack->r_ctl.crte->rate)) {
10284 rack->r_ctl.rc_pace_max_segs = user_max;
10286 /* We are pacing right at the hardware rate */
10289 segsiz = min(ctf_fixed_maxseg(tp),
10290 rack->r_ctl.rc_pace_min_segs);
10291 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(
10292 tp, bw_est, segsiz, 0,
10293 rack->r_ctl.crte, NULL);
10295 } else if (rack->rc_always_pace) {
10296 if (rack->r_ctl.gp_bw ||
10297 #ifdef NETFLIX_PEAKRATE
10298 rack->rc_tp->t_maxpeakrate ||
10300 rack->r_ctl.init_rate) {
10301 /* We have a rate of some sort set */
10304 bw_est = rack_get_bw(rack);
10305 orig = rack->r_ctl.rc_pace_max_segs;
10306 rate_wanted = rack_get_output_bw(rack, bw_est, NULL);
10308 /* We have something */
10309 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack,
10311 ctf_fixed_maxseg(rack->rc_tp));
10313 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs;
10314 if (orig != rack->r_ctl.rc_pace_max_segs)
10316 } else if ((rack->r_ctl.gp_bw == 0) &&
10317 (rack->r_ctl.rc_pace_max_segs == 0)) {
10319 * If we have nothing limit us to bursting
10320 * out IW sized pieces.
10323 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack);
10326 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) {
10328 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES;
10331 rack_log_type_hrdwtso(tp, rack, 0, rack->rc_inp->inp_socket->so_snd.sb_flags, line, 2);
10335 rack_init(struct tcpcb *tp)
10337 struct tcp_rack *rack = NULL;
10338 struct rack_sendmap *insret;
10339 uint32_t iwin, snt, us_cts;
10341 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
10342 if (tp->t_fb_ptr == NULL) {
10344 * We need to allocate memory but cant. The INP and INP_INFO
10345 * locks and they are recusive (happens during setup. So a
10346 * scheme to drop the locks fails :(
10351 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack));
10353 rack = (struct tcp_rack *)tp->t_fb_ptr;
10354 RB_INIT(&rack->r_ctl.rc_mtree);
10355 TAILQ_INIT(&rack->r_ctl.rc_free);
10356 TAILQ_INIT(&rack->r_ctl.rc_tmap);
10359 rack->rc_inp = tp->t_inpcb;
10361 /* Probably not needed but lets be sure */
10362 rack_clear_rate_sample(rack);
10363 rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
10364 rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
10365 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
10367 rack->use_rack_rr = 1;
10368 if (V_tcp_delack_enabled)
10369 tp->t_delayed_ack = 1;
10371 tp->t_delayed_ack = 0;
10372 if (rack_enable_shared_cwnd)
10373 rack->rack_enable_scwnd = 1;
10374 rack->rc_user_set_max_segs = rack_hptsi_segments;
10375 rack->rc_force_max_seg = 0;
10376 if (rack_use_imac_dack)
10377 rack->rc_dack_mode = 1;
10378 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
10379 rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
10380 rack->r_ctl.rc_prop_reduce = rack_use_proportional_reduce;
10381 rack->r_ctl.rc_prop_rate = rack_proportional_rate;
10382 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
10383 rack->r_ctl.rc_early_recovery = rack_early_recovery;
10384 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff;
10385 rack->r_ctl.rc_highest_us_rtt = 0;
10386 if (rack_disable_prr)
10387 rack->rack_no_prr = 1;
10388 if (rack_gp_no_rec_chg)
10389 rack->rc_gp_no_rec_chg = 1;
10390 rack->rc_always_pace = rack_pace_every_seg;
10391 if (rack_enable_mqueue_for_nonpaced)
10392 rack->r_mbuf_queue = 1;
10394 rack->r_mbuf_queue = 0;
10395 if (rack->r_mbuf_queue || rack->rc_always_pace)
10396 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
10398 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
10399 rack_set_pace_segments(tp, rack, __LINE__);
10400 if (rack_limits_scwnd)
10401 rack->r_limit_scw = 1;
10403 rack->r_limit_scw = 0;
10404 rack->r_ctl.rc_high_rwnd = tp->snd_wnd;
10405 rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
10406 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
10407 rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
10408 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
10409 rack->r_ctl.rc_min_to = rack_min_to;
10410 microuptime(&rack->r_ctl.act_rcv_time);
10411 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
10412 rack->r_running_late = 0;
10413 rack->r_running_early = 0;
10414 rack->rc_init_win = rack_default_init_window;
10415 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss;
10416 if (rack_do_dyn_mul) {
10417 /* When dynamic adjustment is on CA needs to start at 100% */
10418 rack->rc_gp_dyn_mul = 1;
10419 if (rack_do_dyn_mul >= 100)
10420 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
10422 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
10423 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec;
10424 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
10425 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
10426 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN,
10427 rack_probertt_filter_life);
10428 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
10429 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
10430 rack->r_ctl.rc_time_of_last_probertt = us_cts;
10431 rack->r_ctl.rc_time_probertt_starts = 0;
10432 /* Do we force on detection? */
10433 #ifdef NETFLIX_EXP_DETECTION
10434 if (tcp_force_detection)
10435 rack->do_detection = 1;
10438 rack->do_detection = 0;
10439 if (rack_non_rxt_use_cr)
10440 rack->rack_rec_nonrxt_use_cr = 1;
10441 if (tp->snd_una != tp->snd_max) {
10442 /* Create a send map for the current outstanding data */
10443 struct rack_sendmap *rsm;
10445 rsm = rack_alloc(rack);
10447 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
10448 tp->t_fb_ptr = NULL;
10451 rsm->r_flags = RACK_OVERMAX;
10452 rsm->r_tim_lastsent[0] = rack->r_ctl.rc_tlp_rxt_last_time;
10453 rsm->r_rtr_cnt = 1;
10454 rsm->r_rtr_bytes = 0;
10455 rsm->r_start = tp->snd_una;
10456 if (tp->t_flags & TF_SENTFIN) {
10457 rsm->r_end = tp->snd_max - 1;
10458 rsm->r_flags |= RACK_HAS_FIN;
10460 rsm->r_end = tp->snd_max;
10462 rsm->usec_orig_send = us_cts;
10464 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
10466 if (insret != NULL) {
10467 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p",
10468 insret, rack, rsm);
10471 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
10472 rsm->r_in_tmap = 1;
10474 /* Cancel the GP measurement in progress */
10475 tp->t_flags &= ~TF_GPUTINPROG;
10476 if (SEQ_GT(tp->snd_max, tp->iss))
10477 snt = tp->snd_max - tp->iss;
10480 iwin = rc_init_window(rack);
10482 /* We are not past the initial window
10483 * so we need to make sure cwnd is
10486 if (tp->snd_cwnd < iwin)
10487 tp->snd_cwnd = iwin;
10489 * If we are within the initial window
10490 * we want ssthresh to be unlimited. Setting
10491 * it to the rwnd (which the default stack does
10492 * and older racks) is not really a good idea
10493 * since we want to be in SS and grow both the
10494 * cwnd and the rwnd (via dynamic rwnd growth). If
10495 * we set it to the rwnd then as the peer grows its
10496 * rwnd we will be stuck in CA and never hit SS.
10498 * Its far better to raise it up high (this takes the
10499 * risk that there as been a loss already, probably
10500 * we should have an indicator in all stacks of loss
10501 * but we don't), but considering the normal use this
10502 * is a risk worth taking. The consequences of not
10503 * hitting SS are far worse than going one more time
10504 * into it early on (before we have sent even a IW).
10505 * It is highly unlikely that we will have had a loss
10506 * before getting the IW out.
10508 tp->snd_ssthresh = 0xffffffff;
10510 rack_stop_all_timers(tp);
10511 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0);
10512 rack_log_rtt_shrinks(rack, us_cts, 0,
10513 __LINE__, RACK_RTTS_INIT);
10518 rack_handoff_ok(struct tcpcb *tp)
10520 if ((tp->t_state == TCPS_CLOSED) ||
10521 (tp->t_state == TCPS_LISTEN)) {
10522 /* Sure no problem though it may not stick */
10525 if ((tp->t_state == TCPS_SYN_SENT) ||
10526 (tp->t_state == TCPS_SYN_RECEIVED)) {
10528 * We really don't know if you support sack,
10529 * you have to get to ESTAB or beyond to tell.
10533 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) {
10535 * Rack will only send a FIN after all data is acknowledged.
10536 * So in this case we have more data outstanding. We can't
10537 * switch stacks until either all data and only the FIN
10538 * is left (in which case rack_init() now knows how
10539 * to deal with that) <or> all is acknowledged and we
10540 * are only left with incoming data, though why you
10541 * would want to switch to rack after all data is acknowledged
10542 * I have no idea (rrs)!
10546 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){
10550 * If we reach here we don't do SACK on this connection so we can
10557 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
10559 if (tp->t_fb_ptr) {
10560 struct tcp_rack *rack;
10561 struct rack_sendmap *rsm, *nrsm, *rm;
10563 rack = (struct tcp_rack *)tp->t_fb_ptr;
10564 #ifdef NETFLIX_SHARED_CWND
10565 if (rack->r_ctl.rc_scw) {
10568 if (rack->r_limit_scw)
10569 limit = max(1, rack->r_ctl.rc_lowest_us_rtt);
10572 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw,
10573 rack->r_ctl.rc_scw_index,
10575 rack->r_ctl.rc_scw = NULL;
10578 /* rack does not use force data but other stacks may clear it */
10579 tp->t_flags &= ~TF_FORCEDATA;
10581 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
10582 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
10583 tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
10585 #ifdef TCP_BLACKBOX
10586 tcp_log_flowend(tp);
10588 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) {
10589 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
10592 panic("At fini, rack:%p rsm:%p rm:%p",
10596 uma_zfree(rack_zone, rsm);
10598 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
10600 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
10601 uma_zfree(rack_zone, rsm);
10602 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
10604 rack->rc_free_cnt = 0;
10605 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
10606 tp->t_fb_ptr = NULL;
10608 /* Cancel the GP measurement in progress */
10609 tp->t_flags &= ~TF_GPUTINPROG;
10610 /* Make sure snd_nxt is correctly set */
10611 tp->snd_nxt = tp->snd_max;
10615 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
10617 switch (tp->t_state) {
10618 case TCPS_SYN_SENT:
10619 rack->r_state = TCPS_SYN_SENT;
10620 rack->r_substate = rack_do_syn_sent;
10622 case TCPS_SYN_RECEIVED:
10623 rack->r_state = TCPS_SYN_RECEIVED;
10624 rack->r_substate = rack_do_syn_recv;
10626 case TCPS_ESTABLISHED:
10627 rack_set_pace_segments(tp, rack, __LINE__);
10628 rack->r_state = TCPS_ESTABLISHED;
10629 rack->r_substate = rack_do_established;
10631 case TCPS_CLOSE_WAIT:
10632 rack->r_state = TCPS_CLOSE_WAIT;
10633 rack->r_substate = rack_do_close_wait;
10635 case TCPS_FIN_WAIT_1:
10636 rack->r_state = TCPS_FIN_WAIT_1;
10637 rack->r_substate = rack_do_fin_wait_1;
10640 rack->r_state = TCPS_CLOSING;
10641 rack->r_substate = rack_do_closing;
10643 case TCPS_LAST_ACK:
10644 rack->r_state = TCPS_LAST_ACK;
10645 rack->r_substate = rack_do_lastack;
10647 case TCPS_FIN_WAIT_2:
10648 rack->r_state = TCPS_FIN_WAIT_2;
10649 rack->r_substate = rack_do_fin_wait_2;
10653 case TCPS_TIME_WAIT:
10660 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
10663 * We received an ack, and then did not
10664 * call send or were bounced out due to the
10665 * hpts was running. Now a timer is up as well, is
10666 * it the right timer?
10668 struct rack_sendmap *rsm;
10671 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
10672 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
10674 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
10675 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
10676 (tmr_up == PACE_TMR_RXT)) {
10677 /* Should be an RXT */
10681 /* Nothing outstanding? */
10682 if (tp->t_flags & TF_DELACK) {
10683 if (tmr_up == PACE_TMR_DELACK)
10684 /* We are supposed to have delayed ack up and we do */
10686 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) {
10688 * if we hit enobufs then we would expect the possiblity
10689 * of nothing outstanding and the RXT up (and the hptsi timer).
10692 } else if (((V_tcp_always_keepalive ||
10693 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
10694 (tp->t_state <= TCPS_CLOSING)) &&
10695 (tmr_up == PACE_TMR_KEEP) &&
10696 (tp->snd_max == tp->snd_una)) {
10697 /* We should have keep alive up and we do */
10701 if (SEQ_GT(tp->snd_max, tp->snd_una) &&
10702 ((tmr_up == PACE_TMR_TLP) ||
10703 (tmr_up == PACE_TMR_RACK) ||
10704 (tmr_up == PACE_TMR_RXT))) {
10706 * Either a Rack, TLP or RXT is fine if we
10707 * have outstanding data.
10710 } else if (tmr_up == PACE_TMR_DELACK) {
10712 * If the delayed ack was going to go off
10713 * before the rtx/tlp/rack timer were going to
10714 * expire, then that would be the timer in control.
10715 * Note we don't check the time here trusting the
10721 * Ok the timer originally started is not what we want now.
10722 * We will force the hpts to be stopped if any, and restart
10723 * with the slot set to what was in the saved slot.
10725 if (rack->rc_inp->inp_in_hpts) {
10726 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
10729 us_cts = tcp_get_usecs(NULL);
10730 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
10732 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
10734 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
10736 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
10738 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
10739 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0);
10743 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
10744 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
10745 int32_t nxt_pkt, struct timeval *tv)
10747 int32_t thflags, retval, did_out = 0;
10748 int32_t way_out = 0;
10751 struct timespec ts;
10753 struct tcp_rack *rack;
10754 struct rack_sendmap *rsm;
10755 int32_t prev_state = 0;
10758 * tv passed from common code is from either M_TSTMP_LRO or
10759 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. The
10760 * rack_pacing stack assumes tv always refers to 'now', so we overwrite
10761 * tv here to guarantee that.
10763 if (m->m_flags & M_TSTMP_LRO)
10766 cts = tcp_tv_to_mssectick(tv);
10767 rack = (struct tcp_rack *)tp->t_fb_ptr;
10769 if ((m->m_flags & M_TSTMP) ||
10770 (m->m_flags & M_TSTMP_LRO)) {
10771 mbuf_tstmp2timespec(m, &ts);
10772 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
10773 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
10775 rack->r_ctl.act_rcv_time = *tv;
10776 kern_prefetch(rack, &prev_state);
10778 thflags = th->th_flags;
10780 NET_EPOCH_ASSERT();
10781 INP_WLOCK_ASSERT(tp->t_inpcb);
10782 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
10784 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
10786 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
10787 union tcp_log_stackspecific log;
10788 struct timeval ltv;
10789 #ifdef NETFLIX_HTTP_LOGGING
10790 struct http_sendfile_track *http_req;
10792 if (SEQ_GT(th->th_ack, tp->snd_una)) {
10793 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1));
10795 http_req = tcp_http_find_req_for_seq(tp, th->th_ack);
10798 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
10799 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
10800 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
10801 if (rack->rack_no_prr == 0)
10802 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
10804 log.u_bbr.flex1 = 0;
10805 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
10806 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
10807 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
10808 log.u_bbr.flex3 = m->m_flags;
10809 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
10810 if (m->m_flags & M_TSTMP) {
10811 /* Record the hardware timestamp if present */
10812 mbuf_tstmp2timespec(m, &ts);
10813 ltv.tv_sec = ts.tv_sec;
10814 ltv.tv_usec = ts.tv_nsec / 1000;
10815 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v);
10816 } else if (m->m_flags & M_TSTMP_LRO) {
10817 /* Record the LRO the arrival timestamp */
10818 mbuf_tstmp2timespec(m, &ts);
10819 ltv.tv_sec = ts.tv_sec;
10820 ltv.tv_usec = ts.tv_nsec / 1000;
10821 log.u_bbr.flex5 = tcp_tv_to_usectick(<v);
10823 log.u_bbr.timeStamp = tcp_get_usecs(<v);
10824 /* Log the rcv time */
10825 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp;
10826 #ifdef NETFLIX_HTTP_LOGGING
10827 log.u_bbr.applimited = tp->t_http_closed;
10828 log.u_bbr.applimited <<= 8;
10829 log.u_bbr.applimited |= tp->t_http_open;
10830 log.u_bbr.applimited <<= 8;
10831 log.u_bbr.applimited |= tp->t_http_req;
10833 /* Copy out any client req info */
10835 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
10837 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
10838 log.u_bbr.rttProp = http_req->timestamp;
10839 log.u_bbr.cur_del_rate = http_req->start;
10840 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
10841 log.u_bbr.flex8 |= 1;
10843 log.u_bbr.flex8 |= 2;
10844 log.u_bbr.bw_inuse = http_req->end;
10846 log.u_bbr.flex6 = http_req->start_seq;
10847 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
10848 log.u_bbr.flex8 |= 4;
10849 log.u_bbr.epoch = http_req->end_seq;
10853 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
10854 tlen, &log, true, <v);
10856 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
10859 goto done_with_input;
10862 * If a segment with the ACK-bit set arrives in the SYN-SENT state
10863 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9.
10865 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
10866 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
10867 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
10868 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10873 * Parse options on any incoming segment.
10875 tcp_dooptions(&to, (u_char *)(th + 1),
10876 (th->th_off << 2) - sizeof(struct tcphdr),
10877 (thflags & TH_SYN) ? TO_SYN : 0);
10880 * If timestamps were negotiated during SYN/ACK and a
10881 * segment without a timestamp is received, silently drop
10882 * the segment, unless it is a RST segment or missing timestamps are
10884 * See section 3.2 of RFC 7323.
10886 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) &&
10887 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) {
10890 goto done_with_input;
10894 * Segment received on connection. Reset idle time and keep-alive
10895 * timer. XXX: This should be done after segment validation to
10896 * ignore broken/spoofed segs.
10898 if (tp->t_idle_reduce &&
10899 (tp->snd_max == tp->snd_una) &&
10900 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
10901 counter_u64_add(rack_input_idle_reduces, 1);
10902 rack_cc_after_idle(rack, tp);
10904 tp->t_rcvtime = ticks;
10906 * Unscale the window into a 32-bit value. For the SYN_SENT state
10907 * the scale is zero.
10909 tiwin = th->th_win << tp->snd_scale;
10911 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
10913 if (tiwin > rack->r_ctl.rc_high_rwnd)
10914 rack->r_ctl.rc_high_rwnd = tiwin;
10916 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
10917 * this to occur after we've validated the segment.
10919 if (tp->t_flags2 & TF2_ECN_PERMIT) {
10920 if (thflags & TH_CWR) {
10921 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
10922 tp->t_flags |= TF_ACKNOW;
10924 switch (iptos & IPTOS_ECN_MASK) {
10926 tp->t_flags2 |= TF2_ECN_SND_ECE;
10927 KMOD_TCPSTAT_INC(tcps_ecn_ce);
10929 case IPTOS_ECN_ECT0:
10930 KMOD_TCPSTAT_INC(tcps_ecn_ect0);
10932 case IPTOS_ECN_ECT1:
10933 KMOD_TCPSTAT_INC(tcps_ecn_ect1);
10937 /* Process a packet differently from RFC3168. */
10938 cc_ecnpkt_handler(tp, th, iptos);
10940 /* Congestion experienced. */
10941 if (thflags & TH_ECE) {
10942 rack_cong_signal(tp, th, CC_ECN);
10947 * If echoed timestamp is later than the current time, fall back to
10948 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
10949 * were used when this connection was established.
10951 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
10952 to.to_tsecr -= tp->ts_offset;
10953 if (TSTMP_GT(to.to_tsecr, cts))
10958 * If its the first time in we need to take care of options and
10959 * verify we can do SACK for rack!
10961 if (rack->r_state == 0) {
10962 /* Should be init'd by rack_init() */
10963 KASSERT(rack->rc_inp != NULL,
10964 ("%s: rack->rc_inp unexpectedly NULL", __func__));
10965 if (rack->rc_inp == NULL) {
10966 rack->rc_inp = tp->t_inpcb;
10970 * Process options only when we get SYN/ACK back. The SYN
10971 * case for incoming connections is handled in tcp_syncache.
10972 * According to RFC1323 the window field in a SYN (i.e., a
10973 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
10974 * this is traditional behavior, may need to be cleaned up.
10976 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
10977 /* Handle parallel SYN for ECN */
10978 if (!(thflags & TH_ACK) &&
10979 ((thflags & (TH_CWR | TH_ECE)) == (TH_CWR | TH_ECE)) &&
10980 ((V_tcp_do_ecn == 1) || (V_tcp_do_ecn == 2))) {
10981 tp->t_flags2 |= TF2_ECN_PERMIT;
10982 tp->t_flags2 |= TF2_ECN_SND_ECE;
10983 TCPSTAT_INC(tcps_ecn_shs);
10985 if ((to.to_flags & TOF_SCALE) &&
10986 (tp->t_flags & TF_REQ_SCALE)) {
10987 tp->t_flags |= TF_RCVD_SCALE;
10988 tp->snd_scale = to.to_wscale;
10990 tp->t_flags &= ~TF_REQ_SCALE;
10992 * Initial send window. It will be updated with the
10993 * next incoming segment to the scaled value.
10995 tp->snd_wnd = th->th_win;
10996 if ((to.to_flags & TOF_TS) &&
10997 (tp->t_flags & TF_REQ_TSTMP)) {
10998 tp->t_flags |= TF_RCVD_TSTMP;
10999 tp->ts_recent = to.to_tsval;
11000 tp->ts_recent_age = cts;
11002 tp->t_flags &= ~TF_REQ_TSTMP;
11003 if (to.to_flags & TOF_MSS)
11004 tcp_mss(tp, to.to_mss);
11005 if ((tp->t_flags & TF_SACK_PERMIT) &&
11006 (to.to_flags & TOF_SACKPERM) == 0)
11007 tp->t_flags &= ~TF_SACK_PERMIT;
11008 if (IS_FASTOPEN(tp->t_flags)) {
11009 if (to.to_flags & TOF_FASTOPEN) {
11012 if (to.to_flags & TOF_MSS)
11015 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
11019 tcp_fastopen_update_cache(tp, mss,
11020 to.to_tfo_len, to.to_tfo_cookie);
11022 tcp_fastopen_disable_path(tp);
11026 * At this point we are at the initial call. Here we decide
11027 * if we are doing RACK or not. We do this by seeing if
11028 * TF_SACK_PERMIT is set and the sack-not-required is clear.
11029 * The code now does do dup-ack counting so if you don't
11030 * switch back you won't get rack & TLP, but you will still
11034 if ((rack_sack_not_required == 0) &&
11035 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
11036 tcp_switch_back_to_default(tp);
11037 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen,
11042 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
11043 tcp_set_hpts(tp->t_inpcb);
11044 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
11046 if (thflags & TH_FIN)
11047 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
11048 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
11049 if ((rack->rc_gp_dyn_mul) &&
11050 (rack->use_fixed_rate == 0) &&
11051 (rack->rc_always_pace)) {
11052 /* Check in on probertt */
11053 rack_check_probe_rtt(rack, us_cts);
11055 if (rack->forced_ack) {
11059 * A persist or keep-alive was forced out, update our
11060 * min rtt time. Note we do not worry about lost
11061 * retransmissions since KEEP-ALIVES and persists
11062 * are usually way long on times of sending (though
11063 * if we were really paranoid or worried we could
11064 * at least use timestamps if available to validate).
11066 rack->forced_ack = 0;
11067 us_rtt = us_cts - rack->r_ctl.forced_ack_ts;
11070 rack_log_rtt_upd(tp, rack, us_rtt, 0, NULL, 3);
11071 rack_apply_updated_usrtt(rack, us_rtt, us_cts);
11074 * This is the one exception case where we set the rack state
11075 * always. All other times (timers etc) we must have a rack-state
11076 * set (so we assure we have done the checks above for SACK).
11078 rack->r_ctl.rc_rcvtime = cts;
11079 if (rack->r_state != tp->t_state)
11080 rack_set_state(tp, rack);
11081 if (SEQ_GT(th->th_ack, tp->snd_una) &&
11082 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL)
11083 kern_prefetch(rsm, &prev_state);
11084 prev_state = rack->r_state;
11085 rack_clear_rate_sample(rack);
11086 retval = (*rack->r_substate) (m, th, so,
11087 tp, &to, drop_hdrlen,
11088 tlen, tiwin, thflags, nxt_pkt, iptos);
11090 if ((retval == 0) &&
11091 (tp->t_inpcb == NULL)) {
11092 panic("retval:%d tp:%p t_inpcb:NULL state:%d",
11093 retval, tp, prev_state);
11098 * If retval is 1 the tcb is unlocked and most likely the tp
11101 INP_WLOCK_ASSERT(tp->t_inpcb);
11102 if ((rack->rc_gp_dyn_mul) &&
11103 (rack->rc_always_pace) &&
11104 (rack->use_fixed_rate == 0) &&
11105 rack->in_probe_rtt &&
11106 (rack->r_ctl.rc_time_probertt_starts == 0)) {
11108 * If we are going for target, lets recheck before
11111 rack_check_probe_rtt(rack, us_cts);
11113 if (rack->set_pacing_done_a_iw == 0) {
11114 /* How much has been acked? */
11115 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) {
11116 /* We have enough to set in the pacing segment size */
11117 rack->set_pacing_done_a_iw = 1;
11118 rack_set_pace_segments(tp, rack, __LINE__);
11121 tcp_rack_xmit_timer_commit(rack, tp);
11122 if (nxt_pkt == 0) {
11123 if (rack->r_wanted_output != 0) {
11126 (void)tp->t_fb->tfb_tcp_output(tp);
11128 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
11130 if ((nxt_pkt == 0) &&
11131 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
11132 (SEQ_GT(tp->snd_max, tp->snd_una) ||
11133 (tp->t_flags & TF_DELACK) ||
11134 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
11135 (tp->t_state <= TCPS_CLOSING)))) {
11136 /* We could not send (probably in the hpts but stopped the timer earlier)? */
11137 if ((tp->snd_max == tp->snd_una) &&
11138 ((tp->t_flags & TF_DELACK) == 0) &&
11139 (rack->rc_inp->inp_in_hpts) &&
11140 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
11141 /* keep alive not needed if we are hptsi output yet */
11145 if (rack->rc_inp->inp_in_hpts) {
11146 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
11147 us_cts = tcp_get_usecs(NULL);
11148 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
11150 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
11153 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
11155 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
11157 if (late && (did_out == 0)) {
11159 * We are late in the sending
11160 * and we did not call the output
11161 * (this probably should not happen).
11163 goto do_output_now;
11165 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0);
11168 } else if (nxt_pkt == 0) {
11169 /* Do we have the correct timer running? */
11170 rack_timer_audit(tp, rack, &so->so_snd);
11174 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out);
11176 rack->r_wanted_output = 0;
11178 if (tp->t_inpcb == NULL) {
11179 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d",
11181 retval, tp, prev_state);
11189 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
11190 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos)
11194 /* First lets see if we have old packets */
11195 if (tp->t_in_pkt) {
11196 if (ctf_do_queued_segments(so, tp, 1)) {
11201 if (m->m_flags & M_TSTMP_LRO) {
11202 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000;
11203 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000;
11205 /* Should not be should we kassert instead? */
11206 tcp_get_usecs(&tv);
11208 if(rack_do_segment_nounlock(m, th, so, tp,
11209 drop_hdrlen, tlen, iptos, 0, &tv) == 0) {
11210 tcp_handle_wakeup(tp, so);
11211 INP_WUNLOCK(tp->t_inpcb);
11215 struct rack_sendmap *
11216 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
11218 struct rack_sendmap *rsm = NULL;
11220 uint32_t srtt = 0, thresh = 0, ts_low = 0;
11222 /* Return the next guy to be re-transmitted */
11223 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
11226 if (tp->t_flags & TF_SENTFIN) {
11227 /* retran the end FIN? */
11230 /* ok lets look at this one */
11231 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
11232 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
11235 rsm = rack_find_lowest_rsm(rack);
11240 if (rsm->r_flags & RACK_ACKED) {
11243 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) &&
11244 (rsm->r_dupack < DUP_ACK_THRESHOLD)) {
11245 /* Its not yet ready */
11248 srtt = rack_grab_rtt(tp, rack);
11249 idx = rsm->r_rtr_cnt - 1;
11250 ts_low = rsm->r_tim_lastsent[idx];
11251 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
11252 if ((tsused == ts_low) ||
11253 (TSTMP_LT(tsused, ts_low))) {
11254 /* No time since sending */
11257 if ((tsused - ts_low) < thresh) {
11258 /* It has not been long enough yet */
11261 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
11262 ((rsm->r_flags & RACK_SACK_PASSED) &&
11263 (rack->sack_attack_disable == 0))) {
11265 * We have passed the dup-ack threshold <or>
11266 * a SACK has indicated this is missing.
11267 * Note that if you are a declared attacker
11268 * it is only the dup-ack threshold that
11269 * will cause retransmits.
11271 /* log retransmit reason */
11272 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1);
11279 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
11280 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method,
11281 int line, struct rack_sendmap *rsm)
11283 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
11284 union tcp_log_stackspecific log;
11287 memset(&log, 0, sizeof(log));
11288 log.u_bbr.flex1 = slot;
11289 log.u_bbr.flex2 = len;
11290 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs;
11291 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs;
11292 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss;
11293 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca;
11294 log.u_bbr.use_lt_bw = rack->app_limited_needs_set;
11295 log.u_bbr.use_lt_bw <<= 1;
11296 log.u_bbr.use_lt_bw = rack->rc_gp_filled;
11297 log.u_bbr.use_lt_bw <<= 1;
11298 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
11299 log.u_bbr.use_lt_bw <<= 1;
11300 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
11301 log.u_bbr.pkt_epoch = line;
11302 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec;
11303 log.u_bbr.bw_inuse = bw_est;
11304 log.u_bbr.delRate = bw;
11305 if (rack->r_ctl.gp_bw == 0)
11306 log.u_bbr.cur_del_rate = 0;
11308 log.u_bbr.cur_del_rate = rack_get_bw(rack);
11309 log.u_bbr.rttProp = len_time;
11310 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt;
11311 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit;
11312 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
11313 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) {
11314 /* We are in slow start */
11315 log.u_bbr.flex7 = 1;
11317 /* we are on congestion avoidance */
11318 log.u_bbr.flex7 = 0;
11320 log.u_bbr.flex8 = method;
11321 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
11322 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
11323 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec;
11324 log.u_bbr.cwnd_gain <<= 1;
11325 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
11326 log.u_bbr.cwnd_gain <<= 1;
11327 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
11328 TCP_LOG_EVENTP(rack->rc_tp, NULL,
11329 &rack->rc_inp->inp_socket->so_rcv,
11330 &rack->rc_inp->inp_socket->so_snd,
11331 BBR_LOG_HPTSI_CALC, 0,
11332 0, &log, false, &tv);
11337 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss)
11339 uint32_t new_tso, user_max;
11341 user_max = rack->rc_user_set_max_segs * mss;
11342 if (rack->rc_force_max_seg) {
11345 if (rack->use_fixed_rate &&
11346 ((rack->r_ctl.crte == NULL) ||
11347 (bw != rack->r_ctl.crte->rate))) {
11348 /* Use the user mss since we are not exactly matched */
11351 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL);
11352 if (new_tso > user_max)
11353 new_tso = user_max;
11358 rack_log_hdwr_pacing(struct tcp_rack *rack, const struct ifnet *ifp,
11359 uint64_t rate, uint64_t hw_rate, int line,
11362 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
11363 union tcp_log_stackspecific log;
11366 memset(&log, 0, sizeof(log));
11367 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff);
11368 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff);
11369 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff);
11370 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff);
11371 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
11372 log.u_bbr.bw_inuse = rate;
11373 log.u_bbr.flex5 = line;
11374 log.u_bbr.flex6 = error;
11375 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs;
11376 log.u_bbr.flex8 = rack->use_fixed_rate;
11377 log.u_bbr.flex8 <<= 1;
11378 log.u_bbr.flex8 |= rack->rack_hdrw_pacing;
11379 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
11380 TCP_LOG_EVENTP(rack->rc_tp, NULL,
11381 &rack->rc_inp->inp_socket->so_rcv,
11382 &rack->rc_inp->inp_socket->so_snd,
11383 BBR_LOG_HDWR_PACE, 0,
11384 0, &log, false, &tv);
11389 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz)
11391 uint64_t lentim, fill_bw;
11393 /* Lets first see if we are full, if so continue with normal rate */
11394 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use)
11396 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd)
11398 if (rack->r_ctl.rc_last_us_rtt == 0)
11400 if (rack->rc_pace_fill_if_rttin_range &&
11401 (rack->r_ctl.rc_last_us_rtt >=
11402 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) {
11403 /* The rtt is huge, N * smallest, lets not fill */
11407 * first lets calculate the b/w based on the last us-rtt
11410 fill_bw = rack->r_ctl.cwnd_to_use;
11411 /* Take the rwnd if its smaller */
11412 if (fill_bw > rack->rc_tp->snd_wnd)
11413 fill_bw = rack->rc_tp->snd_wnd;
11414 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC;
11415 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt;
11416 /* We are below the min b/w */
11417 if (fill_bw < RACK_MIN_BW)
11420 * Ok fill_bw holds our mythical b/w to fill the cwnd
11421 * in a rtt, what does that time wise equate too?
11423 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC;
11425 if (lentim < slot) {
11426 rack_log_pacing_delay_calc(rack, len, slot, fill_bw,
11427 0, lentim, 12, __LINE__, NULL);
11428 return ((int32_t)lentim);
11434 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz)
11436 struct rack_sendmap *lrsm;
11440 if (rack->rc_always_pace == 0) {
11442 * We use the most optimistic possible cwnd/srtt for
11443 * sending calculations. This will make our
11444 * calculation anticipate getting more through
11445 * quicker then possible. But thats ok we don't want
11446 * the peer to have a gap in data sending.
11448 uint32_t srtt, cwnd, tr_perms = 0;
11449 int32_t reduce = 0;
11453 * We keep no precise pacing with the old method
11454 * instead we use the pacer to mitigate bursts.
11456 rack->r_ctl.rc_agg_delayed = 0;
11459 rack->r_ctl.rc_agg_early = 0;
11460 if (rack->r_ctl.rc_rack_min_rtt)
11461 srtt = rack->r_ctl.rc_rack_min_rtt;
11463 srtt = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT));
11464 if (rack->r_ctl.rc_rack_largest_cwnd)
11465 cwnd = rack->r_ctl.rc_rack_largest_cwnd;
11467 cwnd = rack->r_ctl.cwnd_to_use;
11468 tr_perms = cwnd / srtt;
11469 if (tr_perms == 0) {
11470 tr_perms = ctf_fixed_maxseg(tp);
11473 * Calculate how long this will take to drain, if
11474 * the calculation comes out to zero, thats ok we
11475 * will use send_a_lot to possibly spin around for
11476 * more increasing tot_len_this_send to the point
11477 * that its going to require a pace, or we hit the
11478 * cwnd. Which in that case we are just waiting for
11481 slot = len / tr_perms;
11482 /* Now do we reduce the time so we don't run dry? */
11483 if (slot && rack_slot_reduction) {
11484 reduce = (slot / rack_slot_reduction);
11485 if (reduce < slot) {
11490 slot *= HPTS_USEC_IN_MSEC;
11493 * We always consider ourselves app limited with old style
11494 * that are not retransmits. This could be the initial
11495 * measurement, but thats ok its all setup and specially
11496 * handled. If another send leaks out, then that too will
11497 * be mark app-limited.
11499 lrsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
11500 if (lrsm && ((lrsm->r_flags & RACK_APP_LIMITED) == 0)) {
11501 rack->r_ctl.rc_first_appl = lrsm;
11502 lrsm->r_flags |= RACK_APP_LIMITED;
11503 rack->r_ctl.rc_app_limited_cnt++;
11506 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL);
11508 uint64_t bw_est, res, lentim, rate_wanted;
11509 uint32_t orig_val, srtt, segs, oh;
11511 if ((rack->r_rr_config == 1) && rsm) {
11512 return (rack->r_ctl.rc_min_to * HPTS_USEC_IN_MSEC);
11514 if (rack->use_fixed_rate) {
11515 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack);
11516 } else if ((rack->r_ctl.init_rate == 0) &&
11517 #ifdef NETFLIX_PEAKRATE
11518 (rack->rc_tp->t_maxpeakrate == 0) &&
11520 (rack->r_ctl.gp_bw == 0)) {
11521 /* no way to yet do an estimate */
11522 bw_est = rate_wanted = 0;
11524 bw_est = rack_get_bw(rack);
11525 rate_wanted = rack_get_output_bw(rack, bw_est, rsm);
11527 if ((bw_est == 0) || (rate_wanted == 0)) {
11529 * No way yet to make a b/w estimate or
11530 * our raise is set incorrectly.
11534 /* We need to account for all the overheads */
11535 segs = (len + segsiz - 1) / segsiz;
11537 * We need the diff between 1514 bytes (e-mtu with e-hdr)
11538 * and how much data we put in each packet. Yes this
11539 * means we may be off if we are larger than 1500 bytes
11540 * or smaller. But this just makes us more conservative.
11542 if (ETHERNET_SEGMENT_SIZE > segsiz)
11543 oh = ETHERNET_SEGMENT_SIZE - segsiz;
11547 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC;
11548 res = lentim / rate_wanted;
11549 slot = (uint32_t)res;
11550 orig_val = rack->r_ctl.rc_pace_max_segs;
11551 rack_set_pace_segments(rack->rc_tp, rack, __LINE__);
11552 /* Did we change the TSO size, if so log it */
11553 if (rack->r_ctl.rc_pace_max_segs != orig_val)
11554 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL);
11555 if ((rack->rc_pace_to_cwnd) &&
11556 (rack->in_probe_rtt == 0) &&
11557 (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) {
11559 * We want to pace at our rate *or* faster to
11560 * fill the cwnd to the max if its not full.
11562 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz);
11564 if ((rack->rc_inp->inp_route.ro_nh != NULL) &&
11565 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
11566 if ((rack->rack_hdw_pace_ena) &&
11567 (rack->rack_hdrw_pacing == 0) &&
11568 (rack->rack_attempt_hdwr_pace == 0)) {
11570 * Lets attempt to turn on hardware pacing
11573 rack->rack_attempt_hdwr_pace = 1;
11574 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp,
11575 rack->rc_inp->inp_route.ro_nh->nh_ifp,
11579 if (rack->r_ctl.crte) {
11580 rack->rack_hdrw_pacing = 1;
11581 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(rack->rc_tp, rate_wanted, segsiz,
11582 0, rack->r_ctl.crte,
11584 rack_log_hdwr_pacing(rack, rack->rc_inp->inp_route.ro_nh->nh_ifp,
11585 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
11588 } else if (rack->rack_hdrw_pacing &&
11589 (rack->r_ctl.crte->rate != rate_wanted)) {
11590 /* Do we need to adjust our rate? */
11591 const struct tcp_hwrate_limit_table *nrte;
11593 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte,
11595 rack->rc_inp->inp_route.ro_nh->nh_ifp,
11599 if (nrte == NULL) {
11600 /* Lost the rate */
11601 rack->rack_hdrw_pacing = 0;
11602 rack_set_pace_segments(rack->rc_tp, rack, __LINE__);
11603 } else if (nrte != rack->r_ctl.crte) {
11604 rack->r_ctl.crte = nrte;
11605 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(rack->rc_tp, rate_wanted,
11609 rack_log_hdwr_pacing(rack, rack->rc_inp->inp_route.ro_nh->nh_ifp,
11610 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
11615 if (rack_limit_time_with_srtt &&
11616 (rack->use_fixed_rate == 0) &&
11617 #ifdef NETFLIX_PEAKRATE
11618 (rack->rc_tp->t_maxpeakrate == 0) &&
11620 (rack->rack_hdrw_pacing == 0)) {
11622 * Sanity check, we do not allow the pacing delay
11623 * to be longer than the SRTT of the path. If it is
11624 * a slow path, then adding a packet should increase
11625 * the RTT and compensate for this i.e. the srtt will
11626 * be greater so the allowed pacing time will be greater.
11628 * Note this restriction is not for where a peak rate
11629 * is set, we are doing fixed pacing or hardware pacing.
11631 if (rack->rc_tp->t_srtt)
11632 srtt = (TICKS_2_USEC(rack->rc_tp->t_srtt) >> TCP_RTT_SHIFT);
11634 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */
11636 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL);
11640 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm);
11643 counter_u64_add(rack_calc_nonzero, 1);
11645 counter_u64_add(rack_calc_zero, 1);
11650 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack,
11651 tcp_seq startseq, uint32_t sb_offset)
11653 struct rack_sendmap *my_rsm = NULL;
11654 struct rack_sendmap fe;
11656 if (tp->t_state < TCPS_ESTABLISHED) {
11658 * We don't start any measurements if we are
11659 * not at least established.
11663 tp->t_flags |= TF_GPUTINPROG;
11664 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
11665 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
11666 tp->gput_seq = startseq;
11667 rack->app_limited_needs_set = 0;
11668 if (rack->in_probe_rtt)
11669 rack->measure_saw_probe_rtt = 1;
11670 else if ((rack->measure_saw_probe_rtt) &&
11671 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
11672 rack->measure_saw_probe_rtt = 0;
11673 if (rack->rc_gp_filled)
11674 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
11676 /* Special case initial measurement */
11677 rack->r_ctl.rc_gp_output_ts = tp->gput_ts = tcp_get_usecs(NULL);
11680 * We take a guess out into the future,
11681 * if we have no measurement and no
11682 * initial rate, we measure the first
11683 * initial-windows worth of data to
11684 * speed up getting some GP measurement and
11685 * thus start pacing.
11687 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) {
11688 rack->app_limited_needs_set = 1;
11689 tp->gput_ack = startseq + max(rc_init_window(rack),
11690 (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
11691 rack_log_pacing_delay_calc(rack,
11696 rack->r_ctl.rc_app_limited_cnt,
11703 * We are out somewhere in the sb
11704 * can we use the already outstanding data?
11707 if (rack->r_ctl.rc_app_limited_cnt == 0) {
11709 * Yes first one is good and in this case
11710 * the tp->gput_ts is correctly set based on
11711 * the last ack that arrived (no need to
11712 * set things up when an ack comes in).
11714 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
11715 if ((my_rsm == NULL) ||
11716 (my_rsm->r_rtr_cnt != 1)) {
11717 /* retransmission? */
11721 if (rack->r_ctl.rc_first_appl == NULL) {
11723 * If rc_first_appl is NULL
11724 * then the cnt should be 0.
11725 * This is probably an error, maybe
11726 * a KASSERT would be approprate.
11731 * If we have a marker pointer to the last one that is
11732 * app limited we can use that, but we need to set
11733 * things up so that when it gets ack'ed we record
11734 * the ack time (if its not already acked).
11736 rack->app_limited_needs_set = 1;
11738 * We want to get to the rsm that is either
11739 * next with space i.e. over 1 MSS or the one
11740 * after that (after the app-limited).
11742 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
11743 rack->r_ctl.rc_first_appl);
11745 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp))
11746 /* Have to use the next one */
11747 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
11750 /* Use after the first MSS of it is acked */
11751 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp);
11755 if ((my_rsm == NULL) ||
11756 (my_rsm->r_rtr_cnt != 1)) {
11758 * Either its a retransmit or
11759 * the last is the app-limited one.
11764 tp->gput_seq = my_rsm->r_start;
11766 if (my_rsm->r_flags & RACK_ACKED) {
11768 * This one has been acked use the arrival ack time
11770 tp->gput_ts = my_rsm->r_ack_arrival;
11771 rack->app_limited_needs_set = 0;
11773 rack->r_ctl.rc_gp_output_ts = my_rsm->usec_orig_send;
11774 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
11775 rack_log_pacing_delay_calc(rack,
11780 rack->r_ctl.rc_app_limited_cnt,
11788 * We don't know how long we may have been
11789 * idle or if this is the first-send. Lets
11790 * setup the flag so we will trim off
11791 * the first ack'd data so we get a true
11794 rack->app_limited_needs_set = 1;
11795 tp->gput_ack = startseq + rack_get_measure_window(tp, rack);
11796 /* Find this guy so we can pull the send time */
11797 fe.r_start = startseq;
11798 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
11800 rack->r_ctl.rc_gp_output_ts = my_rsm->usec_orig_send;
11801 if (my_rsm->r_flags & RACK_ACKED) {
11803 * Unlikely since its probably what was
11804 * just transmitted (but I am paranoid).
11806 tp->gput_ts = my_rsm->r_ack_arrival;
11807 rack->app_limited_needs_set = 0;
11809 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) {
11810 /* This also is unlikely */
11811 tp->gput_seq = my_rsm->r_start;
11815 * TSNH unless we have some send-map limit,
11816 * and even at that it should not be hitting
11817 * that limit (we should have stopped sending).
11819 rack->r_ctl.rc_gp_output_ts = tcp_get_usecs(NULL);
11821 rack_log_pacing_delay_calc(rack,
11826 rack->r_ctl.rc_app_limited_cnt,
11827 9, __LINE__, NULL);
11830 static inline uint32_t
11831 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use,
11832 uint32_t avail, int32_t sb_offset)
11837 if (tp->snd_wnd > cwnd_to_use)
11838 sendwin = cwnd_to_use;
11840 sendwin = tp->snd_wnd;
11841 if (ctf_outstanding(tp) >= tp->snd_wnd) {
11842 /* We never want to go over our peers rcv-window */
11847 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
11848 if (flight >= sendwin) {
11850 * We have in flight what we are allowed by cwnd (if
11851 * it was rwnd blocking it would have hit above out
11856 len = sendwin - flight;
11857 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) {
11858 /* We would send too much (beyond the rwnd) */
11859 len = tp->snd_wnd - ctf_outstanding(tp);
11861 if ((len + sb_offset) > avail) {
11863 * We don't have that much in the SB, how much is
11866 len = avail - sb_offset;
11873 rack_output(struct tcpcb *tp)
11877 uint32_t sb_offset;
11878 int32_t len, flags, error = 0;
11881 uint32_t if_hw_tsomaxsegcount = 0;
11882 uint32_t if_hw_tsomaxsegsize;
11883 int32_t segsiz, minseg;
11884 long tot_len_this_send = 0;
11885 struct ip *ip = NULL;
11887 struct ipovly *ipov = NULL;
11889 struct udphdr *udp = NULL;
11890 struct tcp_rack *rack;
11894 uint8_t wanted_cookie = 0;
11895 u_char opt[TCP_MAXOLEN];
11896 unsigned ipoptlen, optlen, hdrlen, ulen=0;
11899 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
11900 unsigned ipsec_optlen = 0;
11903 int32_t idle, sendalot;
11904 int32_t sub_from_prr = 0;
11905 volatile int32_t sack_rxmit;
11906 struct rack_sendmap *rsm = NULL;
11910 int32_t sup_rack = 0;
11911 uint32_t cts, us_cts, delayed, early;
11912 uint8_t hpts_calling, new_data_tlp = 0, doing_tlp = 0;
11913 uint32_t cwnd_to_use;
11914 int32_t do_a_prefetch;
11915 int32_t prefetch_rsm = 0;
11918 int32_t prefetch_so_done = 0;
11919 struct tcp_log_buffer *lgb = NULL;
11921 struct sockbuf *sb;
11923 struct ip6_hdr *ip6 = NULL;
11926 uint8_t filled_all = 0;
11927 bool hw_tls = false;
11929 /* setup and take the cache hits here */
11930 rack = (struct tcp_rack *)tp->t_fb_ptr;
11931 inp = rack->rc_inp;
11932 so = inp->inp_socket;
11934 kern_prefetch(sb, &do_a_prefetch);
11936 hpts_calling = inp->inp_hpts_calls;
11937 hw_tls = (so->so_snd.sb_flags & SB_TLS_IFNET) != 0;
11939 NET_EPOCH_ASSERT();
11940 INP_WLOCK_ASSERT(inp);
11942 if (tp->t_flags & TF_TOE)
11943 return (tcp_offload_output(tp));
11946 * For TFO connections in SYN_RECEIVED, only allow the initial
11947 * SYN|ACK and those sent by the retransmit timer.
11949 if (IS_FASTOPEN(tp->t_flags) &&
11950 (tp->t_state == TCPS_SYN_RECEIVED) &&
11951 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */
11952 (rack->r_ctl.rc_resend == NULL)) /* not a retransmit */
11955 if (rack->r_state) {
11956 /* Use the cache line loaded if possible */
11957 isipv6 = rack->r_is_v6;
11959 isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
11963 us_cts = tcp_get_usecs(&tv);
11964 cts = tcp_tv_to_mssectick(&tv);
11965 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
11966 inp->inp_in_hpts) {
11968 * We are on the hpts for some timer but not hptsi output.
11969 * Remove from the hpts unconditionally.
11971 rack_timer_cancel(tp, rack, cts, __LINE__);
11973 /* Are we pacing and late? */
11974 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
11975 TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) {
11976 /* We are delayed */
11977 delayed = us_cts - rack->r_ctl.rc_last_output_to;
11981 /* Do the timers, which may override the pacer */
11982 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
11983 if (rack_process_timers(tp, rack, cts, hpts_calling)) {
11984 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
11988 if ((rack->r_timer_override) ||
11990 (tp->t_state < TCPS_ESTABLISHED)) {
11991 if (tp->t_inpcb->inp_in_hpts)
11992 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
11993 } else if (tp->t_inpcb->inp_in_hpts) {
11995 * On the hpts you can't pass even if ACKNOW is on, we will
11996 * when the hpts fires.
11998 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
12001 inp->inp_hpts_calls = 0;
12002 /* Finish out both pacing early and late accounting */
12003 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
12004 TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
12005 early = rack->r_ctl.rc_last_output_to - us_cts;
12009 rack->r_ctl.rc_agg_delayed += delayed;
12011 } else if (early) {
12012 rack->r_ctl.rc_agg_early += early;
12015 /* Now that early/late accounting is done turn off the flag */
12016 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
12017 rack->r_wanted_output = 0;
12018 rack->r_timer_override = 0;
12020 * For TFO connections in SYN_SENT or SYN_RECEIVED,
12021 * only allow the initial SYN or SYN|ACK and those sent
12022 * by the retransmit timer.
12024 if (IS_FASTOPEN(tp->t_flags) &&
12025 ((tp->t_state == TCPS_SYN_RECEIVED) ||
12026 (tp->t_state == TCPS_SYN_SENT)) &&
12027 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */
12028 (tp->t_rxtshift == 0)) { /* not a retransmit */
12029 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
12030 goto just_return_nolock;
12033 * Determine length of data that should be transmitted, and flags
12034 * that will be used. If there is some data or critical controls
12035 * (SYN, RST) to send, then transmit; otherwise, investigate
12038 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
12039 if (tp->t_idle_reduce) {
12040 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur))
12041 rack_cc_after_idle(rack, tp);
12043 tp->t_flags &= ~TF_LASTIDLE;
12045 if (tp->t_flags & TF_MORETOCOME) {
12046 tp->t_flags |= TF_LASTIDLE;
12050 if ((tp->snd_una == tp->snd_max) &&
12051 rack->r_ctl.rc_went_idle_time &&
12052 TSTMP_GT(us_cts, rack->r_ctl.rc_went_idle_time)) {
12053 idle = us_cts - rack->r_ctl.rc_went_idle_time;
12054 if (idle > rack_min_probertt_hold) {
12055 /* Count as a probe rtt */
12056 if (rack->in_probe_rtt == 0) {
12057 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
12058 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
12059 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
12060 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
12062 rack_exit_probertt(rack, us_cts);
12069 * If we've recently taken a timeout, snd_max will be greater than
12070 * snd_nxt. There may be SACK information that allows us to avoid
12071 * resending already delivered data. Adjust snd_nxt accordingly.
12074 us_cts = tcp_get_usecs(&tv);
12075 cts = tcp_tv_to_mssectick(&tv);
12078 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
12080 sb_offset = tp->snd_max - tp->snd_una;
12081 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
12082 #ifdef NETFLIX_SHARED_CWND
12083 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) &&
12084 rack->rack_enable_scwnd) {
12085 /* We are doing cwnd sharing */
12086 if (rack->rc_gp_filled &&
12087 (rack->rack_attempted_scwnd == 0) &&
12088 (rack->r_ctl.rc_scw == NULL) &&
12090 /* The pcbid is in, lets make an attempt */
12091 counter_u64_add(rack_try_scwnd, 1);
12092 rack->rack_attempted_scwnd = 1;
12093 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp,
12094 &rack->r_ctl.rc_scw_index,
12097 if (rack->r_ctl.rc_scw &&
12098 (rack->rack_scwnd_is_idle == 1) &&
12099 (rack->rc_in_persist == 0) &&
12101 /* we are no longer out of data */
12102 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
12103 rack->rack_scwnd_is_idle = 0;
12105 if (rack->r_ctl.rc_scw) {
12106 /* First lets update and get the cwnd */
12107 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw,
12108 rack->r_ctl.rc_scw_index,
12109 tp->snd_cwnd, tp->snd_wnd, segsiz);
12113 flags = tcp_outflags[tp->t_state];
12114 while (rack->rc_free_cnt < rack_free_cache) {
12115 rsm = rack_alloc(rack);
12117 if (inp->inp_hpts_calls)
12118 /* Retry in a ms */
12119 slot = (1 * HPTS_USEC_IN_MSEC);
12120 goto just_return_nolock;
12122 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
12123 rack->rc_free_cnt++;
12126 if (inp->inp_hpts_calls)
12127 inp->inp_hpts_calls = 0;
12131 if (flags & TH_RST) {
12135 if (rack->r_ctl.rc_resend) {
12136 /* Retransmit timer */
12137 rsm = rack->r_ctl.rc_resend;
12138 rack->r_ctl.rc_resend = NULL;
12139 rsm->r_flags &= ~RACK_TLP;
12140 len = rsm->r_end - rsm->r_start;
12143 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
12144 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
12145 __func__, __LINE__,
12146 rsm->r_start, tp->snd_una, tp, rack, rsm));
12147 sb_offset = rsm->r_start - tp->snd_una;
12150 } else if ((rack->rc_in_persist == 0) &&
12151 ((rsm = tcp_rack_output(tp, rack, cts)) != NULL)) {
12152 /* We have a retransmit that takes precedence */
12153 rsm->r_flags &= ~RACK_TLP;
12154 if ((!IN_RECOVERY(tp->t_flags)) &&
12155 ((tp->t_flags & (TF_WASFRECOVERY | TF_WASCRECOVERY)) == 0)) {
12156 /* Enter recovery if not induced by a time-out */
12157 rack->r_ctl.rc_rsm_start = rsm->r_start;
12158 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
12159 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
12160 rack_cong_signal(tp, NULL, CC_NDUPACK);
12162 * When we enter recovery we need to assure we send
12165 if (rack->rack_no_prr == 0) {
12166 rack->r_ctl.rc_prr_sndcnt = segsiz;
12167 rack_log_to_prr(rack, 13, 0);
12171 if (SEQ_LT(rsm->r_start, tp->snd_una)) {
12172 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
12173 tp, rack, rsm, rsm->r_start, tp->snd_una);
12176 len = rsm->r_end - rsm->r_start;
12177 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
12178 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
12179 __func__, __LINE__,
12180 rsm->r_start, tp->snd_una, tp, rack, rsm));
12181 sb_offset = rsm->r_start - tp->snd_una;
12182 /* Can we send it within the PRR boundary? */
12183 if (rack->rack_no_prr == 0) {
12184 if ((rack->use_rack_rr == 0) && (len > rack->r_ctl.rc_prr_sndcnt)) {
12185 /* It does not fit */
12186 if ((ctf_flight_size(tp, rack->r_ctl.rc_sacked) > len) &&
12187 (rack->r_ctl.rc_prr_sndcnt < segsiz)) {
12189 * prr is less than a segment, we
12190 * have more acks due in besides
12191 * what we need to resend. Lets not send
12192 * to avoid sending small pieces of
12193 * what we need to retransmit.
12196 goto just_return_nolock;
12198 len = rack->r_ctl.rc_prr_sndcnt;
12207 KMOD_TCPSTAT_INC(tcps_sack_rexmits);
12208 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes,
12210 counter_u64_add(rack_rtm_prr_retran, 1);
12212 } else if (rack->r_ctl.rc_tlpsend) {
12213 /* Tail loss probe */
12219 * Check if we can do a TLP with a RACK'd packet
12220 * this can happen if we are not doing the rack
12221 * cheat and we skipped to a TLP and it
12224 rsm = rack->r_ctl.rc_tlpsend;
12225 rsm->r_flags |= RACK_TLP;
12226 rack->r_ctl.rc_tlpsend = NULL;
12228 tlen = rsm->r_end - rsm->r_start;
12231 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
12232 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
12233 __func__, __LINE__,
12234 rsm->r_start, tp->snd_una, tp, rack, rsm));
12235 sb_offset = rsm->r_start - tp->snd_una;
12236 cwin = min(tp->snd_wnd, tlen);
12240 * Enforce a connection sendmap count limit if set
12241 * as long as we are not retransmiting.
12243 if ((rsm == NULL) &&
12244 (rack->do_detection == 0) &&
12245 (V_tcp_map_entries_limit > 0) &&
12246 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
12247 counter_u64_add(rack_to_alloc_limited, 1);
12248 if (!rack->alloc_limit_reported) {
12249 rack->alloc_limit_reported = 1;
12250 counter_u64_add(rack_alloc_limited_conns, 1);
12252 goto just_return_nolock;
12254 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
12255 /* we are retransmitting the fin */
12259 * When retransmitting data do *not* include the
12260 * FIN. This could happen from a TLP probe.
12266 /* For debugging */
12267 rack->r_ctl.rc_rsm_at_retran = rsm;
12270 * Get standard flags, and add SYN or FIN if requested by 'hidden'
12273 if (tp->t_flags & TF_NEEDFIN)
12275 if (tp->t_flags & TF_NEEDSYN)
12277 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
12279 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
12281 kern_prefetch(end_rsm, &prefetch_rsm);
12286 * If snd_nxt == snd_max and we have transmitted a FIN, the
12287 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
12288 * negative length. This can also occur when TCP opens up its
12289 * congestion window while receiving additional duplicate acks after
12290 * fast-retransmit because TCP will reset snd_nxt to snd_max after
12291 * the fast-retransmit.
12293 * In the normal retransmit-FIN-only case, however, snd_nxt will be
12294 * set to snd_una, the sb_offset will be 0, and the length may wind
12297 * If sack_rxmit is true we are retransmitting from the scoreboard
12298 * in which case len is already set.
12300 if ((sack_rxmit == 0) &&
12301 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) {
12304 avail = sbavail(sb);
12305 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail)
12306 sb_offset = tp->snd_nxt - tp->snd_una;
12309 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) {
12310 if (rack->r_ctl.rc_tlp_new_data) {
12311 /* TLP is forcing out new data */
12312 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
12313 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
12315 if (rack->r_ctl.rc_tlp_new_data > tp->snd_wnd)
12318 len = rack->r_ctl.rc_tlp_new_data;
12319 rack->r_ctl.rc_tlp_new_data = 0;
12320 new_data_tlp = doing_tlp = 1;
12322 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset);
12323 if (IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) {
12325 * For prr=off, we need to send only 1 MSS
12326 * at a time. We do this because another sack could
12327 * be arriving that causes us to send retransmits and
12328 * we don't want to be on a long pace due to a larger send
12329 * that keeps us from sending out the retransmit.
12334 uint32_t outstanding;
12337 * We are inside of a SACK recovery episode and are
12338 * sending new data, having retransmitted all the
12339 * data possible so far in the scoreboard.
12341 outstanding = tp->snd_max - tp->snd_una;
12342 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) {
12343 if (tp->snd_wnd > outstanding) {
12344 len = tp->snd_wnd - outstanding;
12345 /* Check to see if we have the data */
12346 if ((sb_offset + len) > avail) {
12347 /* It does not all fit */
12348 if (avail > sb_offset)
12349 len = avail - sb_offset;
12355 } else if (avail > sb_offset)
12356 len = avail - sb_offset;
12360 if (len > rack->r_ctl.rc_prr_sndcnt)
12361 len = rack->r_ctl.rc_prr_sndcnt;
12364 counter_u64_add(rack_rtm_prr_newdata, 1);
12367 if (len > segsiz) {
12369 * We should never send more than a MSS when
12370 * retransmitting or sending new data in prr
12371 * mode unless the override flag is on. Most
12372 * likely the PRR algorithm is not going to
12373 * let us send a lot as well :-)
12375 if (rack->r_ctl.rc_prr_sendalot == 0)
12377 } else if (len < segsiz) {
12379 * Do we send any? The idea here is if the
12380 * send empty's the socket buffer we want to
12381 * do it. However if not then lets just wait
12382 * for our prr_sndcnt to get bigger.
12386 leftinsb = sbavail(sb) - sb_offset;
12387 if (leftinsb > len) {
12388 /* This send does not empty the sb */
12393 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) {
12395 * If you have not established
12396 * and are not doing FAST OPEN
12399 if ((sack_rxmit == 0) &&
12400 (!IS_FASTOPEN(tp->t_flags))){
12405 if (prefetch_so_done == 0) {
12406 kern_prefetch(so, &prefetch_so_done);
12407 prefetch_so_done = 1;
12410 * Lop off SYN bit if it has already been sent. However, if this is
12411 * SYN-SENT state and if segment contains data and if we don't know
12412 * that foreign host supports TAO, suppress sending segment.
12414 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) &&
12415 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) {
12417 * When sending additional segments following a TFO SYN|ACK,
12418 * do not include the SYN bit.
12420 if (IS_FASTOPEN(tp->t_flags) &&
12421 (tp->t_state == TCPS_SYN_RECEIVED))
12425 * Be careful not to send data and/or FIN on SYN segments. This
12426 * measure is needed to prevent interoperability problems with not
12427 * fully conformant TCP implementations.
12429 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
12434 * On TFO sockets, ensure no data is sent in the following cases:
12436 * - When retransmitting SYN|ACK on a passively-created socket
12438 * - When retransmitting SYN on an actively created socket
12440 * - When sending a zero-length cookie (cookie request) on an
12441 * actively created socket
12443 * - When the socket is in the CLOSED state (RST is being sent)
12445 if (IS_FASTOPEN(tp->t_flags) &&
12446 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
12447 ((tp->t_state == TCPS_SYN_SENT) &&
12448 (tp->t_tfo_client_cookie_len == 0)) ||
12449 (flags & TH_RST))) {
12453 /* Without fast-open there should never be data sent on a SYN */
12454 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) {
12455 tp->snd_nxt = tp->iss;
12461 * If FIN has been sent but not acked, but we haven't been
12462 * called to retransmit, len will be < 0. Otherwise, window
12463 * shrank after we sent into it. If window shrank to 0,
12464 * cancel pending retransmit, pull snd_nxt back to (closed)
12465 * window, and set the persist timer if it isn't already
12466 * going. If the window didn't close completely, just wait
12469 * We also do a general check here to ensure that we will
12470 * set the persist timer when we have data to send, but a
12471 * 0-byte window. This makes sure the persist timer is set
12472 * even if the packet hits one of the "goto send" lines
12476 if ((tp->snd_wnd == 0) &&
12477 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
12478 (tp->snd_una == tp->snd_max) &&
12479 (sb_offset < (int)sbavail(sb))) {
12480 tp->snd_nxt = tp->snd_una;
12481 rack_enter_persist(tp, rack, cts);
12483 } else if ((rsm == NULL) &&
12484 ((doing_tlp == 0) || (new_data_tlp == 1)) &&
12485 (len < rack->r_ctl.rc_pace_max_segs)) {
12487 * We are not sending a maximum sized segment for
12488 * some reason. Should we not send anything (think
12489 * sws or persists)?
12491 if ((tp->snd_wnd < min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), minseg)) &&
12492 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
12494 (len < (int)(sbavail(sb) - sb_offset))) {
12496 * Here the rwnd is less than
12497 * the minimum pacing size, this is not a retransmit,
12498 * we are established and
12499 * the send is not the last in the socket buffer
12500 * we send nothing, and we may enter persists
12501 * if nothing is outstanding.
12504 if (tp->snd_max == tp->snd_una) {
12506 * Nothing out we can
12507 * go into persists.
12509 rack_enter_persist(tp, rack, cts);
12510 tp->snd_nxt = tp->snd_una;
12512 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) &&
12513 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
12514 (len < (int)(sbavail(sb) - sb_offset)) &&
12517 * Here we are not retransmitting, and
12518 * the cwnd is not so small that we could
12519 * not send at least a min size (rxt timer
12520 * not having gone off), We have 2 segments or
12521 * more already in flight, its not the tail end
12522 * of the socket buffer and the cwnd is blocking
12523 * us from sending out a minimum pacing segment size.
12524 * Lets not send anything.
12527 } else if (((tp->snd_wnd - ctf_outstanding(tp)) <
12528 min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
12529 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
12530 (len < (int)(sbavail(sb) - sb_offset)) &&
12531 (TCPS_HAVEESTABLISHED(tp->t_state))) {
12533 * Here we have a send window but we have
12534 * filled it up and we can't send another pacing segment.
12535 * We also have in flight more than 2 segments
12536 * and we are not completing the sb i.e. we allow
12537 * the last bytes of the sb to go out even if
12538 * its not a full pacing segment.
12543 /* len will be >= 0 after this point. */
12544 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
12545 tcp_sndbuf_autoscale(tp, so, min(tp->snd_wnd, cwnd_to_use));
12547 * Decide if we can use TCP Segmentation Offloading (if supported by
12550 * TSO may only be used if we are in a pure bulk sending state. The
12551 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
12552 * options prevent using TSO. With TSO the TCP header is the same
12553 * (except for the sequence number) for all generated packets. This
12554 * makes it impossible to transmit any options which vary per
12555 * generated segment or packet.
12557 * IPv4 handling has a clear separation of ip options and ip header
12558 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
12559 * the right thing below to provide length of just ip options and thus
12560 * checking for ipoptlen is enough to decide if ip options are present.
12565 ipoptlen = ip6_optlen(tp->t_inpcb);
12568 if (tp->t_inpcb->inp_options)
12569 ipoptlen = tp->t_inpcb->inp_options->m_len -
12570 offsetof(struct ipoption, ipopt_list);
12573 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
12575 * Pre-calculate here as we save another lookup into the darknesses
12576 * of IPsec that way and can actually decide if TSO is ok.
12579 if (isipv6 && IPSEC_ENABLED(ipv6))
12580 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb);
12586 if (IPSEC_ENABLED(ipv4))
12587 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb);
12591 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
12592 ipoptlen += ipsec_optlen;
12594 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz &&
12595 (tp->t_port == 0) &&
12596 ((tp->t_flags & TF_SIGNATURE) == 0) &&
12597 tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
12601 uint32_t outstanding;
12603 outstanding = tp->snd_max - tp->snd_una;
12604 if (tp->t_flags & TF_SENTFIN) {
12606 * If we sent a fin, snd_max is 1 higher than
12612 if ((rsm->r_flags & RACK_HAS_FIN) == 0)
12615 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
12620 recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
12621 (long)TCP_MAXWIN << tp->rcv_scale);
12624 * Sender silly window avoidance. We transmit under the following
12625 * conditions when len is non-zero:
12627 * - We have a full segment (or more with TSO) - This is the last
12628 * buffer in a write()/send() and we are either idle or running
12629 * NODELAY - we've timed out (e.g. persist timer) - we have more
12630 * then 1/2 the maximum send window's worth of data (receiver may be
12631 * limited the window size) - we need to retransmit
12634 if (len >= segsiz) {
12638 * NOTE! on localhost connections an 'ack' from the remote
12639 * end may occur synchronously with the output and cause us
12640 * to flush a buffer queued with moretocome. XXX
12643 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
12644 (idle || (tp->t_flags & TF_NODELAY)) &&
12645 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
12646 (tp->t_flags & TF_NOPUSH) == 0) {
12650 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */
12654 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
12658 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */
12666 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) &&
12667 (ctf_outstanding(tp) < (segsiz * 2))) {
12669 * We have less than two MSS outstanding (delayed ack)
12670 * and our rwnd will not let us send a full sized
12671 * MSS. Lets go ahead and let this small segment
12672 * out because we want to try to have at least two
12673 * packets inflight to not be caught by delayed ack.
12680 * Sending of standalone window updates.
12682 * Window updates are important when we close our window due to a
12683 * full socket buffer and are opening it again after the application
12684 * reads data from it. Once the window has opened again and the
12685 * remote end starts to send again the ACK clock takes over and
12686 * provides the most current window information.
12688 * We must avoid the silly window syndrome whereas every read from
12689 * the receive buffer, no matter how small, causes a window update
12690 * to be sent. We also should avoid sending a flurry of window
12691 * updates when the socket buffer had queued a lot of data and the
12692 * application is doing small reads.
12694 * Prevent a flurry of pointless window updates by only sending an
12695 * update when we can increase the advertized window by more than
12696 * 1/4th of the socket buffer capacity. When the buffer is getting
12697 * full or is very small be more aggressive and send an update
12698 * whenever we can increase by two mss sized segments. In all other
12699 * situations the ACK's to new incoming data will carry further
12700 * window increases.
12702 * Don't send an independent window update if a delayed ACK is
12703 * pending (it will get piggy-backed on it) or the remote side
12704 * already has done a half-close and won't send more data. Skip
12705 * this if the connection is in T/TCP half-open state.
12707 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
12708 !(tp->t_flags & TF_DELACK) &&
12709 !TCPS_HAVERCVDFIN(tp->t_state)) {
12711 * "adv" is the amount we could increase the window, taking
12712 * into account that we are limited by TCP_MAXWIN <<
12719 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
12720 oldwin = (tp->rcv_adv - tp->rcv_nxt);
12724 /* We can't increase the window */
12731 * If the new window size ends up being the same as or less
12732 * than the old size when it is scaled, then don't force
12735 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
12738 if (adv >= (int32_t)(2 * segsiz) &&
12739 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
12740 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
12741 so->so_rcv.sb_hiwat <= 8 * segsiz)) {
12745 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) {
12753 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
12754 * is also a catch-all for the retransmit timer timeout case.
12756 if (tp->t_flags & TF_ACKNOW) {
12760 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
12765 * If our state indicates that FIN should be sent and we have not
12766 * yet done so, then we need to send.
12768 if ((flags & TH_FIN) &&
12769 (tp->snd_nxt == tp->snd_una)) {
12774 * No reason to send a segment, just return.
12777 SOCKBUF_UNLOCK(sb);
12778 just_return_nolock:
12780 int app_limited = CTF_JR_SENT_DATA;
12782 if (tot_len_this_send > 0) {
12783 /* Make sure snd_nxt is up to max */
12784 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
12785 tp->snd_nxt = tp->snd_max;
12786 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz);
12788 int end_window = 0;
12789 uint32_t seq = tp->gput_ack;
12791 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
12794 * Mark the last sent that we just-returned (hinting
12795 * that delayed ack may play a role in any rtt measurement).
12797 rsm->r_just_ret = 1;
12799 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
12800 rack->r_ctl.rc_agg_delayed = 0;
12803 rack->r_ctl.rc_agg_early = 0;
12804 if ((ctf_outstanding(tp) +
12805 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)),
12806 minseg)) >= tp->snd_wnd) {
12807 /* We are limited by the rwnd */
12808 app_limited = CTF_JR_RWND_LIMITED;
12809 } else if (ctf_outstanding(tp) >= sbavail(sb)) {
12810 /* We are limited by whats available -- app limited */
12811 app_limited = CTF_JR_APP_LIMITED;
12812 } else if ((idle == 0) &&
12813 ((tp->t_flags & TF_NODELAY) == 0) &&
12814 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
12817 * No delay is not on and the
12818 * user is sending less than 1MSS. This
12819 * brings out SWS avoidance so we
12820 * don't send. Another app-limited case.
12822 app_limited = CTF_JR_APP_LIMITED;
12823 } else if (tp->t_flags & TF_NOPUSH) {
12825 * The user has requested no push of
12826 * the last segment and we are
12827 * at the last segment. Another app
12830 app_limited = CTF_JR_APP_LIMITED;
12831 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) {
12833 app_limited = CTF_JR_CWND_LIMITED;
12834 } else if (rack->rc_in_persist == 1) {
12835 /* We are in persists */
12836 app_limited = CTF_JR_PERSISTS;
12837 } else if (IN_RECOVERY(tp->t_flags) &&
12838 (rack->rack_no_prr == 0) &&
12839 (rack->r_ctl.rc_prr_sndcnt < segsiz)) {
12840 app_limited = CTF_JR_PRR;
12842 /* Now why here are we not sending? */
12845 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use);
12848 app_limited = CTF_JR_ASSESSING;
12851 * App limited in some fashion, for our pacing GP
12852 * measurements we don't want any gap (even cwnd).
12853 * Close down the measurement window.
12855 if (rack_cwnd_block_ends_measure &&
12856 ((app_limited == CTF_JR_CWND_LIMITED) ||
12857 (app_limited == CTF_JR_PRR))) {
12859 * The reason we are not sending is
12860 * the cwnd (or prr). We have been configured
12861 * to end the measurement window in
12865 } else if (app_limited == CTF_JR_PERSISTS) {
12867 * We never end the measurement window
12868 * in persists, though in theory we
12869 * should be only entering after everything
12870 * is acknowledged (so we will probably
12871 * never come here).
12874 } else if (rack_rwnd_block_ends_measure &&
12875 (app_limited == CTF_JR_RWND_LIMITED)) {
12877 * We are rwnd limited and have been
12878 * configured to end the measurement
12879 * window in this case.
12882 } else if (app_limited == CTF_JR_APP_LIMITED) {
12884 * A true application limited period, we have
12888 } else if (app_limited == CTF_JR_ASSESSING) {
12890 * In the assessing case we hit the end of
12891 * the if/else and had no known reason
12892 * This will panic us under invariants..
12894 * If we get this out in logs we need to
12895 * investagate which reason we missed.
12902 if ((tp->t_flags & TF_GPUTINPROG) &&
12903 SEQ_GT(tp->gput_ack, tp->snd_max)) {
12904 /* Mark the last packet has app limited */
12905 tp->gput_ack = tp->snd_max;
12908 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
12909 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
12910 if (rack->r_ctl.rc_app_limited_cnt == 0)
12911 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
12914 * Go out to the end app limited and mark
12915 * this new one as next and move the end_appl up
12918 if (rack->r_ctl.rc_end_appl)
12919 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
12920 rack->r_ctl.rc_end_appl = rsm;
12922 rsm->r_flags |= RACK_APP_LIMITED;
12923 rack->r_ctl.rc_app_limited_cnt++;
12926 rack_log_pacing_delay_calc(rack,
12927 rack->r_ctl.rc_app_limited_cnt, seq,
12928 tp->gput_ack, 0, 0, 4, __LINE__, NULL);
12932 /* set the rack tcb into the slot N */
12933 counter_u64_add(rack_paced_segments, 1);
12934 } else if (tot_len_this_send) {
12935 counter_u64_add(rack_unpaced_segments, 1);
12937 /* Check if we need to go into persists or not */
12938 if ((rack->rc_in_persist == 0) &&
12939 (tp->snd_max == tp->snd_una) &&
12940 TCPS_HAVEESTABLISHED(tp->t_state) &&
12942 (sbavail(sb) > tp->snd_wnd) &&
12943 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) {
12944 /* Yes lets make sure to move to persist before timer-start */
12945 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
12947 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack);
12948 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use);
12950 #ifdef NETFLIX_SHARED_CWND
12951 if ((sbavail(sb) == 0) &&
12952 rack->r_ctl.rc_scw) {
12953 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
12954 rack->rack_scwnd_is_idle = 1;
12960 if ((flags & TH_FIN) &&
12963 * We do not transmit a FIN
12964 * with data outstanding. We
12965 * need to make it so all data
12970 /* Enforce stack imposed max seg size if we have one */
12971 if (rack->r_ctl.rc_pace_max_segs &&
12972 (len > rack->r_ctl.rc_pace_max_segs)) {
12974 len = rack->r_ctl.rc_pace_max_segs;
12976 SOCKBUF_LOCK_ASSERT(sb);
12979 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
12981 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
12984 * Before ESTABLISHED, force sending of initial options unless TCP
12985 * set not to do any options. NOTE: we assume that the IP/TCP header
12986 * plus TCP options always fit in a single mbuf, leaving room for a
12987 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
12988 * + optlen <= MCLBYTES
12993 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
12996 hdrlen = sizeof(struct tcpiphdr);
12999 * Compute options for segment. We only have to care about SYN and
13000 * established connection segments. Options for SYN-ACK segments
13001 * are handled in TCP syncache.
13004 if ((tp->t_flags & TF_NOOPT) == 0) {
13005 /* Maximum segment size. */
13006 if (flags & TH_SYN) {
13007 tp->snd_nxt = tp->iss;
13008 to.to_mss = tcp_mssopt(&inp->inp_inc);
13009 #ifdef NETFLIX_TCPOUDP
13011 to.to_mss -= V_tcp_udp_tunneling_overhead;
13013 to.to_flags |= TOF_MSS;
13016 * On SYN or SYN|ACK transmits on TFO connections,
13017 * only include the TFO option if it is not a
13018 * retransmit, as the presence of the TFO option may
13019 * have caused the original SYN or SYN|ACK to have
13020 * been dropped by a middlebox.
13022 if (IS_FASTOPEN(tp->t_flags) &&
13023 (tp->t_rxtshift == 0)) {
13024 if (tp->t_state == TCPS_SYN_RECEIVED) {
13025 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
13027 (u_int8_t *)&tp->t_tfo_cookie.server;
13028 to.to_flags |= TOF_FASTOPEN;
13030 } else if (tp->t_state == TCPS_SYN_SENT) {
13032 tp->t_tfo_client_cookie_len;
13034 tp->t_tfo_cookie.client;
13035 to.to_flags |= TOF_FASTOPEN;
13038 * If we wind up having more data to
13039 * send with the SYN than can fit in
13040 * one segment, don't send any more
13041 * until the SYN|ACK comes back from
13048 /* Window scaling. */
13049 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
13050 to.to_wscale = tp->request_r_scale;
13051 to.to_flags |= TOF_SCALE;
13054 if ((tp->t_flags & TF_RCVD_TSTMP) ||
13055 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
13056 to.to_tsval = cts + tp->ts_offset;
13057 to.to_tsecr = tp->ts_recent;
13058 to.to_flags |= TOF_TS;
13060 /* Set receive buffer autosizing timestamp. */
13061 if (tp->rfbuf_ts == 0 &&
13062 (so->so_rcv.sb_flags & SB_AUTOSIZE))
13063 tp->rfbuf_ts = tcp_ts_getticks();
13064 /* Selective ACK's. */
13065 if (flags & TH_SYN)
13066 to.to_flags |= TOF_SACKPERM;
13067 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
13068 tp->rcv_numsacks > 0) {
13069 to.to_flags |= TOF_SACK;
13070 to.to_nsacks = tp->rcv_numsacks;
13071 to.to_sacks = (u_char *)tp->sackblks;
13073 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
13074 /* TCP-MD5 (RFC2385). */
13075 if (tp->t_flags & TF_SIGNATURE)
13076 to.to_flags |= TOF_SIGNATURE;
13077 #endif /* TCP_SIGNATURE */
13079 /* Processing the options. */
13080 hdrlen += optlen = tcp_addoptions(&to, opt);
13082 * If we wanted a TFO option to be added, but it was unable
13083 * to fit, ensure no data is sent.
13085 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
13086 !(to.to_flags & TOF_FASTOPEN))
13089 #ifdef NETFLIX_TCPOUDP
13091 if (V_tcp_udp_tunneling_port == 0) {
13092 /* The port was removed?? */
13093 SOCKBUF_UNLOCK(&so->so_snd);
13094 return (EHOSTUNREACH);
13096 hdrlen += sizeof(struct udphdr);
13101 ipoptlen = ip6_optlen(tp->t_inpcb);
13104 if (tp->t_inpcb->inp_options)
13105 ipoptlen = tp->t_inpcb->inp_options->m_len -
13106 offsetof(struct ipoption, ipopt_list);
13109 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
13110 ipoptlen += ipsec_optlen;
13114 * Adjust data length if insertion of options will bump the packet
13115 * length beyond the t_maxseg length. Clear the FIN bit because we
13116 * cut off the tail of the segment.
13118 if (len + optlen + ipoptlen > tp->t_maxseg) {
13120 uint32_t if_hw_tsomax;
13124 /* extract TSO information */
13125 if_hw_tsomax = tp->t_tsomax;
13126 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
13127 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
13128 KASSERT(ipoptlen == 0,
13129 ("%s: TSO can't do IP options", __func__));
13132 * Check if we should limit by maximum payload
13135 if (if_hw_tsomax != 0) {
13136 /* compute maximum TSO length */
13137 max_len = (if_hw_tsomax - hdrlen -
13139 if (max_len <= 0) {
13141 } else if (len > max_len) {
13148 * Prevent the last segment from being fractional
13149 * unless the send sockbuf can be emptied:
13151 max_len = (tp->t_maxseg - optlen);
13152 if ((sb_offset + len) < sbavail(sb)) {
13153 moff = len % (u_int)max_len;
13160 * In case there are too many small fragments don't
13163 if (len <= segsiz) {
13168 * Send the FIN in a separate segment after the bulk
13169 * sending is done. We don't trust the TSO
13170 * implementations to clear the FIN flag on all but
13171 * the last segment.
13173 if (tp->t_flags & TF_NEEDFIN) {
13178 if (optlen + ipoptlen >= tp->t_maxseg) {
13180 * Since we don't have enough space to put
13181 * the IP header chain and the TCP header in
13182 * one packet as required by RFC 7112, don't
13183 * send it. Also ensure that at least one
13184 * byte of the payload can be put into the
13187 SOCKBUF_UNLOCK(&so->so_snd);
13192 len = tp->t_maxseg - optlen - ipoptlen;
13199 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
13200 ("%s: len > IP_MAXPACKET", __func__));
13203 if (max_linkhdr + hdrlen > MCLBYTES)
13205 if (max_linkhdr + hdrlen > MHLEN)
13207 panic("tcphdr too big");
13211 * This KASSERT is here to catch edge cases at a well defined place.
13212 * Before, those had triggered (random) panic conditions further
13215 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
13217 (flags & TH_FIN) &&
13220 * We have outstanding data, don't send a fin by itself!.
13225 * Grab a header mbuf, attaching a copy of data to be transmitted,
13226 * and initialize the header from the template for sends on this
13233 if (rack->r_ctl.rc_pace_max_segs)
13234 max_val = rack->r_ctl.rc_pace_max_segs;
13235 else if (rack->rc_user_set_max_segs)
13236 max_val = rack->rc_user_set_max_segs * segsiz;
13240 * We allow a limit on sending with hptsi.
13242 if (len > max_val) {
13247 if (MHLEN < hdrlen + max_linkhdr)
13248 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
13251 m = m_gethdr(M_NOWAIT, MT_DATA);
13254 SOCKBUF_UNLOCK(sb);
13259 m->m_data += max_linkhdr;
13263 * Start the m_copy functions from the closest mbuf to the
13264 * sb_offset in the socket buffer chain.
13266 mb = sbsndptr_noadv(sb, sb_offset, &moff);
13267 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
13268 m_copydata(mb, moff, (int)len,
13269 mtod(m, caddr_t)+hdrlen);
13270 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
13271 sbsndptr_adv(sb, mb, len);
13274 struct sockbuf *msb;
13276 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
13280 m->m_next = tcp_m_copym(
13282 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
13283 ((rsm == NULL) ? hw_tls : 0)
13284 #ifdef NETFLIX_COPY_ARGS
13288 if (len <= (tp->t_maxseg - optlen)) {
13290 * Must have ran out of mbufs for the copy
13291 * shorten it to no longer need tso. Lets
13292 * not put on sendalot since we are low on
13297 if (m->m_next == NULL) {
13298 SOCKBUF_UNLOCK(sb);
13305 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
13306 if (rsm && (rsm->r_flags & RACK_TLP)) {
13308 * TLP should not count in retran count, but
13311 counter_u64_add(rack_tlp_retran, 1);
13312 counter_u64_add(rack_tlp_retran_bytes, len);
13314 tp->t_sndrexmitpack++;
13315 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
13316 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
13319 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
13323 KMOD_TCPSTAT_INC(tcps_sndpack);
13324 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
13326 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
13331 * If we're sending everything we've got, set PUSH. (This
13332 * will keep happy those implementations which only give
13333 * data to the user when a buffer fills or a PUSH comes in.)
13335 if (sb_offset + len == sbused(sb) &&
13340 SOCKBUF_UNLOCK(sb);
13342 SOCKBUF_UNLOCK(sb);
13343 if (tp->t_flags & TF_ACKNOW)
13344 KMOD_TCPSTAT_INC(tcps_sndacks);
13345 else if (flags & (TH_SYN | TH_FIN | TH_RST))
13346 KMOD_TCPSTAT_INC(tcps_sndctrl);
13348 KMOD_TCPSTAT_INC(tcps_sndwinup);
13350 m = m_gethdr(M_NOWAIT, MT_DATA);
13357 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
13359 M_ALIGN(m, hdrlen);
13362 m->m_data += max_linkhdr;
13365 SOCKBUF_UNLOCK_ASSERT(sb);
13366 m->m_pkthdr.rcvif = (struct ifnet *)0;
13368 mac_inpcb_create_mbuf(inp, m);
13372 ip6 = mtod(m, struct ip6_hdr *);
13373 #ifdef NETFLIX_TCPOUDP
13375 udp = (struct udphdr *)((caddr_t)ip6 + ipoptlen + sizeof(struct ip6_hdr));
13376 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
13377 udp->uh_dport = tp->t_port;
13378 ulen = hdrlen + len - sizeof(struct ip6_hdr);
13379 udp->uh_ulen = htons(ulen);
13380 th = (struct tcphdr *)(udp + 1);
13383 th = (struct tcphdr *)(ip6 + 1);
13384 tcpip_fillheaders(inp,
13385 #ifdef NETFLIX_TCPOUDP
13392 ip = mtod(m, struct ip *);
13394 ipov = (struct ipovly *)ip;
13396 #ifdef NETFLIX_TCPOUDP
13398 udp = (struct udphdr *)((caddr_t)ip + ipoptlen + sizeof(struct ip));
13399 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
13400 udp->uh_dport = tp->t_port;
13401 ulen = hdrlen + len - sizeof(struct ip);
13402 udp->uh_ulen = htons(ulen);
13403 th = (struct tcphdr *)(udp + 1);
13406 th = (struct tcphdr *)(ip + 1);
13407 tcpip_fillheaders(inp,
13408 #ifdef NETFLIX_TCPOUDP
13414 * Fill in fields, remembering maximum advertised window for use in
13415 * delaying messages about window sizes. If resending a FIN, be sure
13416 * not to use a new sequence number.
13418 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
13419 tp->snd_nxt == tp->snd_max)
13422 * If we are starting a connection, send ECN setup SYN packet. If we
13423 * are on a retransmit, we may resend those bits a number of times
13426 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) {
13427 if (tp->t_rxtshift >= 1) {
13428 if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
13429 flags |= TH_ECE | TH_CWR;
13431 flags |= TH_ECE | TH_CWR;
13433 /* Handle parallel SYN for ECN */
13434 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
13435 (tp->t_flags2 & TF2_ECN_SND_ECE)) {
13437 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
13439 if (tp->t_state == TCPS_ESTABLISHED &&
13440 (tp->t_flags2 & TF2_ECN_PERMIT)) {
13442 * If the peer has ECN, mark data packets with ECN capable
13443 * transmission (ECT). Ignore pure ack packets,
13446 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) &&
13447 (sack_rxmit == 0)) {
13450 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
13453 ip->ip_tos |= IPTOS_ECN_ECT0;
13454 KMOD_TCPSTAT_INC(tcps_ecn_ect0);
13456 * Reply with proper ECN notifications.
13457 * Only set CWR on new data segments.
13459 if (tp->t_flags2 & TF2_ECN_SND_CWR) {
13461 tp->t_flags2 &= ~TF2_ECN_SND_CWR;
13464 if (tp->t_flags2 & TF2_ECN_SND_ECE)
13468 * If we are doing retransmissions, then snd_nxt will not reflect
13469 * the first unsent octet. For ACK only packets, we do not want the
13470 * sequence number of the retransmitted packet, we want the sequence
13471 * number of the next unsent octet. So, if there is no data (and no
13472 * SYN or FIN), use snd_max instead of snd_nxt when filling in
13473 * ti_seq. But if we are in persist state, snd_max might reflect
13474 * one byte beyond the right edge of the window, so use snd_nxt in
13475 * that case, since we know we aren't doing a retransmission.
13476 * (retransmit and persist are mutually exclusive...)
13478 if (sack_rxmit == 0) {
13479 if (len || (flags & (TH_SYN | TH_FIN)) ||
13480 rack->rc_in_persist) {
13481 th->th_seq = htonl(tp->snd_nxt);
13482 rack_seq = tp->snd_nxt;
13483 } else if (flags & TH_RST) {
13485 * For a Reset send the last cum ack in sequence
13486 * (this like any other choice may still generate a
13487 * challenge ack, if a ack-update packet is in
13490 th->th_seq = htonl(tp->snd_una);
13491 rack_seq = tp->snd_una;
13493 th->th_seq = htonl(tp->snd_max);
13494 rack_seq = tp->snd_max;
13497 th->th_seq = htonl(rsm->r_start);
13498 rack_seq = rsm->r_start;
13500 th->th_ack = htonl(tp->rcv_nxt);
13502 bcopy(opt, th + 1, optlen);
13503 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
13505 th->th_flags = flags;
13507 * Calculate receive window. Don't shrink window, but avoid silly
13509 * If a RST segment is sent, advertise a window of zero.
13511 if (flags & TH_RST) {
13514 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
13515 recwin < (long)segsiz)
13517 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
13518 recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
13519 recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
13523 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
13524 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is
13525 * handled in syncache.
13527 if (flags & TH_SYN)
13528 th->th_win = htons((u_short)
13529 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
13531 /* Avoid shrinking window with window scaling. */
13532 recwin = roundup2(recwin, 1 << tp->rcv_scale);
13533 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
13536 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
13537 * window. This may cause the remote transmitter to stall. This
13538 * flag tells soreceive() to disable delayed acknowledgements when
13539 * draining the buffer. This can occur if the receiver is
13540 * attempting to read more data than can be buffered prior to
13541 * transmitting on the connection.
13543 if (th->th_win == 0) {
13544 tp->t_sndzerowin++;
13545 tp->t_flags |= TF_RXWIN0SENT;
13547 tp->t_flags &= ~TF_RXWIN0SENT;
13548 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
13550 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
13551 if (to.to_flags & TOF_SIGNATURE) {
13553 * Calculate MD5 signature and put it into the place
13554 * determined before.
13555 * NOTE: since TCP options buffer doesn't point into
13556 * mbuf's data, calculate offset and use it.
13558 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
13559 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
13561 * Do not send segment if the calculation of MD5
13562 * digest has failed.
13570 * Put TCP length in extended header, and then checksum extended
13573 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
13577 * ip6_plen is not need to be filled now, and will be filled
13581 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
13582 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
13583 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
13584 th->th_sum = htons(0);
13585 UDPSTAT_INC(udps_opackets);
13587 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
13588 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
13589 th->th_sum = in6_cksum_pseudo(ip6,
13590 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
13595 #if defined(INET6) && defined(INET)
13601 m->m_pkthdr.csum_flags = CSUM_UDP;
13602 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
13603 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
13604 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
13605 th->th_sum = htons(0);
13606 UDPSTAT_INC(udps_opackets);
13608 m->m_pkthdr.csum_flags = CSUM_TCP;
13609 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
13610 th->th_sum = in_pseudo(ip->ip_src.s_addr,
13611 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
13612 IPPROTO_TCP + len + optlen));
13614 /* IP version must be set here for ipv4/ipv6 checking later */
13615 KASSERT(ip->ip_v == IPVERSION,
13616 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
13620 * Enable TSO and specify the size of the segments. The TCP pseudo
13621 * header checksum is always provided. XXX: Fixme: This is currently
13622 * not the case for IPv6.
13625 KASSERT(len > tp->t_maxseg - optlen,
13626 ("%s: len <= tso_segsz", __func__));
13627 m->m_pkthdr.csum_flags |= CSUM_TSO;
13628 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
13630 KASSERT(len + hdrlen == m_length(m, NULL),
13631 ("%s: mbuf chain different than expected: %d + %u != %u",
13632 __func__, len, hdrlen, m_length(m, NULL)));
13635 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
13636 hhook_run_tcp_est_out(tp, th, &to, len, tso);
13642 if (so->so_options & SO_DEBUG) {
13649 save = ipov->ih_len;
13650 ipov->ih_len = htons(m->m_pkthdr.len /* - hdrlen +
13651 * (th->th_off << 2) */ );
13653 tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0);
13657 ipov->ih_len = save;
13659 #endif /* TCPDEBUG */
13661 /* We're getting ready to send; log now. */
13662 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
13663 union tcp_log_stackspecific log;
13666 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
13667 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
13668 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
13669 if (rack->rack_no_prr)
13670 log.u_bbr.flex1 = 0;
13672 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
13673 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
13674 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
13675 log.u_bbr.flex4 = orig_len;
13677 log.u_bbr.flex5 = 0x80000000;
13679 log.u_bbr.flex5 = 0;
13680 /* Save off the early/late values */
13681 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
13682 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
13683 log.u_bbr.bw_inuse = rack_get_bw(rack);
13684 if (rsm || sack_rxmit) {
13686 log.u_bbr.flex8 = 2;
13688 log.u_bbr.flex8 = 1;
13690 log.u_bbr.flex8 = 0;
13692 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
13693 log.u_bbr.flex7 = mark;
13694 log.u_bbr.pkts_out = tp->t_maxseg;
13695 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
13696 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
13697 log.u_bbr.lt_epoch = cwnd_to_use;
13698 log.u_bbr.delivered = sendalot;
13699 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
13700 len, &log, false, NULL, NULL, 0, &tv);
13705 * Fill in IP length and desired time to live and send to IP level.
13706 * There should be a better way to handle ttl and tos; we could keep
13707 * them in the template, but need a way to checksum without them.
13710 * m->m_pkthdr.len should have been set before cksum calcuration,
13711 * because in6_cksum() need it.
13716 * we separately set hoplimit for every segment, since the
13717 * user might want to change the value via setsockopt. Also,
13718 * desired default hop limit might be changed via Neighbor
13721 ip6->ip6_hlim = in6_selecthlim(inp, NULL);
13724 * Set the packet size here for the benefit of DTrace
13725 * probes. ip6_output() will set it properly; it's supposed
13726 * to include the option header lengths as well.
13728 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
13730 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
13731 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
13733 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
13735 if (tp->t_state == TCPS_SYN_SENT)
13736 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
13738 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
13739 /* TODO: IPv6 IP6TOS_ECT bit on */
13740 error = ip6_output(m, inp->in6p_outputopts,
13742 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0),
13745 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
13746 mtu = inp->inp_route6.ro_nh->nh_mtu;
13749 #if defined(INET) && defined(INET6)
13754 ip->ip_len = htons(m->m_pkthdr.len);
13756 if (inp->inp_vflag & INP_IPV6PROTO)
13757 ip->ip_ttl = in6_selecthlim(inp, NULL);
13760 * If we do path MTU discovery, then we set DF on every
13761 * packet. This might not be the best thing to do according
13762 * to RFC3390 Section 2. However the tcp hostcache migitates
13763 * the problem so it affects only the first tcp connection
13766 * NB: Don't set DF on small MTU/MSS to have a safe
13769 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
13770 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
13771 if (tp->t_port == 0 || len < V_tcp_minmss) {
13772 ip->ip_off |= htons(IP_DF);
13775 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
13778 if (tp->t_state == TCPS_SYN_SENT)
13779 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
13781 TCP_PROBE5(send, NULL, tp, ip, tp, th);
13783 error = ip_output(m, inp->inp_options, &inp->inp_route,
13784 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0,
13786 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
13787 mtu = inp->inp_route.ro_nh->nh_mtu;
13793 lgb->tlb_errno = error;
13797 * In transmit state, time the transmission and arrange for the
13798 * retransmit. In persist state, just set snd_max.
13801 rack->forced_ack = 0; /* If we send something zap the FA flag */
13802 if (rsm && (doing_tlp == 0)) {
13803 /* Set we retransmitted */
13804 rack->rc_gp_saw_rec = 1;
13806 if (cwnd_to_use > tp->snd_ssthresh) {
13807 /* Set we sent in CA */
13808 rack->rc_gp_saw_ca = 1;
13810 /* Set we sent in SS */
13811 rack->rc_gp_saw_ss = 1;
13814 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
13815 (tp->t_flags & TF_SACK_PERMIT) &&
13816 tp->rcv_numsacks > 0)
13817 tcp_clean_dsack_blocks(tp);
13818 tot_len_this_send += len;
13820 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
13821 else if (len == 1) {
13822 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
13823 } else if (len > 1) {
13826 idx = (len / segsiz) + 3;
13827 if (idx >= TCP_MSS_ACCT_ATIMER)
13828 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
13830 counter_u64_add(rack_out_size[idx], 1);
13833 if (rack->rack_no_prr == 0) {
13834 if (sub_from_prr && (error == 0)) {
13835 if (rack->r_ctl.rc_prr_sndcnt >= len)
13836 rack->r_ctl.rc_prr_sndcnt -= len;
13838 rack->r_ctl.rc_prr_sndcnt = 0;
13842 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, cts,
13843 pass, rsm, us_cts);
13844 if ((error == 0) &&
13846 (tp->snd_una == tp->snd_max))
13847 rack->r_ctl.rc_tlp_rxt_last_time = cts;
13848 /* Now are we in persists? */
13849 if (rack->rc_in_persist == 0) {
13850 tcp_seq startseq = tp->snd_nxt;
13852 /* Track our lost count */
13853 if (rsm && (doing_tlp == 0))
13854 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start;
13856 * Advance snd_nxt over sequence space of this segment.
13859 /* We don't log or do anything with errors */
13861 if (doing_tlp == 0) {
13864 * Not a retransmission of some
13865 * sort, new data is going out so
13866 * clear our TLP count and flag.
13868 rack->rc_tlp_in_progress = 0;
13869 rack->r_ctl.rc_tlp_cnt_out = 0;
13873 * We have just sent a TLP, mark that it is true
13874 * and make sure our in progress is set so we
13875 * continue to check the count.
13877 rack->rc_tlp_in_progress = 1;
13878 rack->r_ctl.rc_tlp_cnt_out++;
13880 if (flags & (TH_SYN | TH_FIN)) {
13881 if (flags & TH_SYN)
13883 if (flags & TH_FIN) {
13885 tp->t_flags |= TF_SENTFIN;
13888 /* In the ENOBUFS case we do *not* update snd_max */
13892 tp->snd_nxt += len;
13893 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
13894 if (tp->snd_una == tp->snd_max) {
13896 * Update the time we just added data since
13897 * none was outstanding.
13899 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
13900 tp->t_acktime = ticks;
13902 tp->snd_max = tp->snd_nxt;
13904 * Time this transmission if not a retransmission and
13905 * not currently timing anything.
13906 * This is only relevant in case of switching back to
13909 if (tp->t_rtttime == 0) {
13910 tp->t_rtttime = ticks;
13911 tp->t_rtseq = startseq;
13912 KMOD_TCPSTAT_INC(tcps_segstimed);
13915 ((tp->t_flags & TF_GPUTINPROG) == 0))
13916 rack_start_gp_measurement(tp, rack, startseq, sb_offset);
13920 * Persist case, update snd_max but since we are in persist
13921 * mode (no window) we do not update snd_nxt.
13923 int32_t xlen = len;
13928 if (flags & TH_SYN)
13930 if (flags & TH_FIN) {
13932 tp->t_flags |= TF_SENTFIN;
13934 /* In the ENOBUFS case we do *not* update snd_max */
13935 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) {
13936 if (tp->snd_una == tp->snd_max) {
13938 * Update the time we just added data since
13939 * none was outstanding.
13941 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
13942 tp->t_acktime = ticks;
13944 tp->snd_max = tp->snd_nxt + len;
13949 rack->r_ctl.rc_agg_delayed = 0;
13952 rack->r_ctl.rc_agg_early = 0;
13953 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */
13955 * Failures do not advance the seq counter above. For the
13956 * case of ENOBUFS we will fall out and retry in 1ms with
13957 * the hpts. Everything else will just have to retransmit
13960 * In any case, we do not want to loop around for another
13961 * send without a good reason.
13966 tp->t_softerror = error;
13971 * Pace us right away to retry in a some
13974 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
13975 if (rack->rc_enobuf < 126)
13977 if (slot > ((rack->rc_rack_rtt / 2) * HPTS_USEC_IN_MSEC)) {
13978 slot = (rack->rc_rack_rtt / 2) * HPTS_USEC_IN_MSEC;
13980 if (slot < (10 * HPTS_USEC_IN_MSEC))
13981 slot = 10 * HPTS_USEC_IN_MSEC;
13983 counter_u64_add(rack_saw_enobuf, 1);
13988 * For some reason the interface we used initially
13989 * to send segments changed to another or lowered
13990 * its MTU. If TSO was active we either got an
13991 * interface without TSO capabilits or TSO was
13992 * turned off. If we obtained mtu from ip_output()
13993 * then update it and try again.
13996 tp->t_flags &= ~TF_TSO;
13998 tcp_mss_update(tp, -1, mtu, NULL, NULL);
14001 slot = 10 * HPTS_USEC_IN_MSEC;
14002 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
14005 counter_u64_add(rack_saw_enetunreach, 1);
14009 if (TCPS_HAVERCVDSYN(tp->t_state)) {
14010 tp->t_softerror = error;
14014 slot = 10 * HPTS_USEC_IN_MSEC;
14015 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
14019 rack->rc_enobuf = 0;
14021 KMOD_TCPSTAT_INC(tcps_sndtotal);
14024 * Data sent (as far as we can tell). If this advertises a larger
14025 * window than any other segment, then remember the size of the
14026 * advertised window. Any pending ACK has now been sent.
14028 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
14029 tp->rcv_adv = tp->rcv_nxt + recwin;
14030 tp->last_ack_sent = tp->rcv_nxt;
14031 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
14033 /* Assure when we leave that snd_nxt will point to top */
14034 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
14035 tp->snd_nxt = tp->snd_max;
14037 /* Do we need to turn off sendalot? */
14038 if (rack->r_ctl.rc_pace_max_segs &&
14039 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) {
14040 /* We hit our max. */
14042 } else if ((rack->rc_user_set_max_segs) &&
14043 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) {
14044 /* We hit the user defined max */
14048 if ((error == 0) && (flags & TH_FIN))
14049 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN);
14050 if (flags & TH_RST) {
14052 * We don't send again after sending a RST.
14057 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
14058 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) {
14060 * Get our pacing rate, if an error
14061 * occurred in sending (ENOBUF) we would
14062 * hit the else if with slot preset. Other
14065 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz);
14068 rack->use_rack_rr) {
14069 /* Its a retransmit and we use the rack cheat? */
14071 (rack->rc_always_pace == 0) ||
14072 (rack->r_rr_config == 1)) {
14074 * We have no pacing set or we
14075 * are using old-style rack or
14076 * we are overriden to use the old 1ms pacing.
14078 slot = rack->r_ctl.rc_min_to * HPTS_USEC_IN_MSEC;
14082 /* set the rack tcb into the slot N */
14083 counter_u64_add(rack_paced_segments, 1);
14084 } else if (sendalot) {
14086 counter_u64_add(rack_unpaced_segments, 1);
14090 counter_u64_add(rack_unpaced_segments, 1);
14092 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0);
14097 rack_update_seg(struct tcp_rack *rack)
14101 orig_val = rack->r_ctl.rc_pace_max_segs;
14102 rack_set_pace_segments(rack->rc_tp, rack, __LINE__);
14103 if (orig_val != rack->r_ctl.rc_pace_max_segs)
14104 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL);
14108 * rack_ctloutput() must drop the inpcb lock before performing copyin on
14109 * socket option arguments. When it re-acquires the lock after the copy, it
14110 * has to revalidate that the connection is still valid for the socket
14114 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
14115 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
14117 struct epoch_tracker et;
14119 int32_t error = 0, optval;
14122 switch (sopt->sopt_name) {
14123 case TCP_RACK_PROP_RATE: /* URL:prop_rate */
14124 case TCP_RACK_PROP : /* URL:prop */
14125 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */
14126 case TCP_RACK_EARLY_RECOV: /* URL:early_recov */
14127 case TCP_RACK_PACE_REDUCE: /* Not used */
14128 /* Pacing related ones */
14129 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */
14130 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */
14131 case TCP_BBR_IWINTSO: /* URL:tso_iwin */
14132 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */
14133 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */
14134 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */
14135 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/
14136 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */
14137 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */
14138 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */
14139 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */
14140 case TCP_RACK_RR_CONF: /* URL:rrr_conf */
14141 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */
14142 /* End pacing related */
14144 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */
14145 case TCP_RACK_MIN_TO: /* URL:min_to */
14146 case TCP_RACK_EARLY_SEG: /* URL:early_seg */
14147 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */
14148 case TCP_RACK_REORD_FADE: /* URL:reord_fade */
14149 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */
14150 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */
14151 case TCP_RACK_TLP_USE: /* URL:tlp_use */
14152 case TCP_RACK_TLP_INC_VAR: /* URL:tlp_inc_var */
14153 case TCP_RACK_IDLE_REDUCE_HIGH: /* URL:idle_reduce_high */
14154 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */
14155 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */
14156 case TCP_RACK_DO_DETECTION: /* URL:detect */
14157 case TCP_NO_PRR: /* URL:noprr */
14158 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */
14159 case TCP_DATA_AFTER_CLOSE:
14160 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */
14161 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */
14162 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */
14163 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */
14164 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */
14165 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */
14166 case TCP_RACK_PROFILE: /* URL:profile */
14169 return (tcp_default_ctloutput(so, sopt, inp, tp));
14173 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
14177 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
14179 return (ECONNRESET);
14181 tp = intotcpcb(inp);
14182 rack = (struct tcp_rack *)tp->t_fb_ptr;
14183 switch (sopt->sopt_name) {
14184 case TCP_RACK_PROFILE:
14185 RACK_OPTS_INC(tcp_profile);
14187 /* pace_always=1 */
14188 rack->rc_always_pace = 1;
14189 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
14191 rack->rack_enable_scwnd = 1;
14193 rack->rc_gp_dyn_mul = 1;
14194 rack->r_ctl.rack_per_of_gp_ca = 100;
14196 rack->r_rr_config = 3;
14198 rack->r_ctl.rc_no_push_at_mrtt = 2;
14200 rack->rc_pace_to_cwnd = 1;
14201 rack->rc_pace_fill_if_rttin_range = 0;
14202 rack->rtt_limit_mul = 0;
14204 rack->rack_no_prr = 1;
14206 rack->r_limit_scw = 1;
14207 } else if (optval == 2) {
14208 /* pace_always=1 */
14209 rack->rc_always_pace = 1;
14210 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
14212 rack->rack_enable_scwnd = 1;
14214 rack->rc_gp_dyn_mul = 1;
14215 rack->r_ctl.rack_per_of_gp_ca = 100;
14217 rack->r_rr_config = 3;
14219 rack->r_ctl.rc_no_push_at_mrtt = 2;
14221 rack->rc_pace_to_cwnd = 1;
14222 rack->rc_pace_fill_if_rttin_range = 0;
14223 rack->rtt_limit_mul = 0;
14225 rack->rack_no_prr = 1;
14227 rack->r_limit_scw = 0;
14230 case TCP_SHARED_CWND_TIME_LIMIT:
14231 RACK_OPTS_INC(tcp_lscwnd);
14233 rack->r_limit_scw = 1;
14235 rack->r_limit_scw = 0;
14237 case TCP_RACK_PACE_TO_FILL:
14238 RACK_OPTS_INC(tcp_fillcw);
14240 rack->rc_pace_to_cwnd = 0;
14242 rack->rc_pace_to_cwnd = 1;
14243 if ((optval >= rack_gp_rtt_maxmul) &&
14244 rack_gp_rtt_maxmul &&
14246 rack->rc_pace_fill_if_rttin_range = 1;
14247 rack->rtt_limit_mul = optval;
14249 rack->rc_pace_fill_if_rttin_range = 0;
14250 rack->rtt_limit_mul = 0;
14253 case TCP_RACK_NO_PUSH_AT_MAX:
14254 RACK_OPTS_INC(tcp_npush);
14256 rack->r_ctl.rc_no_push_at_mrtt = 0;
14257 else if (optval < 0xff)
14258 rack->r_ctl.rc_no_push_at_mrtt = optval;
14262 case TCP_SHARED_CWND_ENABLE:
14263 RACK_OPTS_INC(tcp_rack_scwnd);
14265 rack->rack_enable_scwnd = 0;
14267 rack->rack_enable_scwnd = 1;
14269 case TCP_RACK_MBUF_QUEUE:
14270 /* Now do we use the LRO mbuf-queue feature */
14271 RACK_OPTS_INC(tcp_rack_mbufq);
14273 rack->r_mbuf_queue = 1;
14275 rack->r_mbuf_queue = 0;
14276 if (rack->r_mbuf_queue || rack->rc_always_pace)
14277 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
14279 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
14281 case TCP_RACK_NONRXT_CFG_RATE:
14282 RACK_OPTS_INC(tcp_rack_cfg_rate);
14284 rack->rack_rec_nonrxt_use_cr = 0;
14286 rack->rack_rec_nonrxt_use_cr = 1;
14289 RACK_OPTS_INC(tcp_rack_noprr);
14291 rack->rack_no_prr = 0;
14293 rack->rack_no_prr = 1;
14295 case TCP_TIMELY_DYN_ADJ:
14296 RACK_OPTS_INC(tcp_timely_dyn);
14298 rack->rc_gp_dyn_mul = 0;
14300 rack->rc_gp_dyn_mul = 1;
14301 if (optval >= 100) {
14303 * If the user sets something 100 or more
14304 * its the gp_ca value.
14306 rack->r_ctl.rack_per_of_gp_ca = optval;
14310 case TCP_RACK_DO_DETECTION:
14311 RACK_OPTS_INC(tcp_rack_do_detection);
14313 rack->do_detection = 0;
14315 rack->do_detection = 1;
14317 case TCP_RACK_PROP_RATE:
14318 if ((optval <= 0) || (optval >= 100)) {
14322 RACK_OPTS_INC(tcp_rack_prop_rate);
14323 rack->r_ctl.rc_prop_rate = optval;
14325 case TCP_RACK_TLP_USE:
14326 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
14330 RACK_OPTS_INC(tcp_tlp_use);
14331 rack->rack_tlp_threshold_use = optval;
14333 case TCP_RACK_PROP:
14334 /* RACK proportional rate reduction (bool) */
14335 RACK_OPTS_INC(tcp_rack_prop);
14336 rack->r_ctl.rc_prop_reduce = optval;
14338 case TCP_RACK_TLP_REDUCE:
14339 /* RACK TLP cwnd reduction (bool) */
14340 RACK_OPTS_INC(tcp_rack_tlp_reduce);
14341 rack->r_ctl.rc_tlp_cwnd_reduce = optval;
14343 case TCP_RACK_EARLY_RECOV:
14344 /* Should recovery happen early (bool) */
14345 RACK_OPTS_INC(tcp_rack_early_recov);
14346 rack->r_ctl.rc_early_recovery = optval;
14349 /* Pacing related ones */
14350 case TCP_RACK_PACE_ALWAYS:
14352 * zero is old rack method, 1 is new
14353 * method using a pacing rate.
14355 RACK_OPTS_INC(tcp_rack_pace_always);
14357 rack->rc_always_pace = 1;
14359 rack->rc_always_pace = 0;
14360 if (rack->r_mbuf_queue || rack->rc_always_pace)
14361 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
14363 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
14364 /* A rate may be set irate or other, if so set seg size */
14365 rack_update_seg(rack);
14367 case TCP_BBR_RACK_INIT_RATE:
14368 RACK_OPTS_INC(tcp_initial_rate);
14370 /* Change from kbits per second to bytes per second */
14373 rack->r_ctl.init_rate = val;
14374 if (rack->rc_init_win != rack_default_init_window) {
14378 * Options don't always get applied
14379 * in the order you think. So in order
14380 * to assure we update a cwnd we need
14381 * to check and see if we are still
14382 * where we should raise the cwnd.
14384 win = rc_init_window(rack);
14385 if (SEQ_GT(tp->snd_max, tp->iss))
14386 snt = tp->snd_max - tp->iss;
14390 (tp->snd_cwnd < win))
14391 tp->snd_cwnd = win;
14393 if (rack->rc_always_pace)
14394 rack_update_seg(rack);
14396 case TCP_BBR_IWINTSO:
14397 RACK_OPTS_INC(tcp_initial_win);
14398 if (optval && (optval <= 0xff)) {
14401 rack->rc_init_win = optval;
14402 win = rc_init_window(rack);
14403 if (SEQ_GT(tp->snd_max, tp->iss))
14404 snt = tp->snd_max - tp->iss;
14409 #ifdef NETFLIX_PEAKRATE
14410 tp->t_maxpeakrate |
14412 rack->r_ctl.init_rate)) {
14414 * We are not past the initial window
14415 * and we have some bases for pacing,
14416 * so we need to possibly adjust up
14417 * the cwnd. Note even if we don't set
14418 * the cwnd, its still ok to raise the rc_init_win
14419 * which can be used coming out of idle when we
14420 * would have a rate.
14422 if (tp->snd_cwnd < win)
14423 tp->snd_cwnd = win;
14425 if (rack->rc_always_pace)
14426 rack_update_seg(rack);
14430 case TCP_RACK_FORCE_MSEG:
14431 RACK_OPTS_INC(tcp_rack_force_max_seg);
14433 rack->rc_force_max_seg = 1;
14435 rack->rc_force_max_seg = 0;
14437 case TCP_RACK_PACE_MAX_SEG:
14438 /* Max segments size in a pace in bytes */
14439 RACK_OPTS_INC(tcp_rack_max_seg);
14440 rack->rc_user_set_max_segs = optval;
14441 rack_set_pace_segments(tp, rack, __LINE__);
14443 case TCP_RACK_PACE_RATE_REC:
14444 /* Set the fixed pacing rate in Bytes per second ca */
14445 RACK_OPTS_INC(tcp_rack_pace_rate_rec);
14446 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
14447 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
14448 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
14449 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
14450 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
14451 rack->use_fixed_rate = 1;
14452 rack_log_pacing_delay_calc(rack,
14453 rack->r_ctl.rc_fixed_pacing_rate_ss,
14454 rack->r_ctl.rc_fixed_pacing_rate_ca,
14455 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
14459 case TCP_RACK_PACE_RATE_SS:
14460 /* Set the fixed pacing rate in Bytes per second ca */
14461 RACK_OPTS_INC(tcp_rack_pace_rate_ss);
14462 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
14463 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
14464 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
14465 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
14466 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
14467 rack->use_fixed_rate = 1;
14468 rack_log_pacing_delay_calc(rack,
14469 rack->r_ctl.rc_fixed_pacing_rate_ss,
14470 rack->r_ctl.rc_fixed_pacing_rate_ca,
14471 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
14475 case TCP_RACK_PACE_RATE_CA:
14476 /* Set the fixed pacing rate in Bytes per second ca */
14477 RACK_OPTS_INC(tcp_rack_pace_rate_ca);
14478 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
14479 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
14480 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
14481 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
14482 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
14483 rack->use_fixed_rate = 1;
14484 rack_log_pacing_delay_calc(rack,
14485 rack->r_ctl.rc_fixed_pacing_rate_ss,
14486 rack->r_ctl.rc_fixed_pacing_rate_ca,
14487 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
14490 case TCP_RACK_GP_INCREASE_REC:
14491 RACK_OPTS_INC(tcp_gp_inc_rec);
14492 rack->r_ctl.rack_per_of_gp_rec = optval;
14493 rack_log_pacing_delay_calc(rack,
14494 rack->r_ctl.rack_per_of_gp_ss,
14495 rack->r_ctl.rack_per_of_gp_ca,
14496 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
14499 case TCP_RACK_GP_INCREASE_CA:
14500 RACK_OPTS_INC(tcp_gp_inc_ca);
14504 * We don't allow any reduction
14510 rack->r_ctl.rack_per_of_gp_ca = ca;
14511 rack_log_pacing_delay_calc(rack,
14512 rack->r_ctl.rack_per_of_gp_ss,
14513 rack->r_ctl.rack_per_of_gp_ca,
14514 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
14517 case TCP_RACK_GP_INCREASE_SS:
14518 RACK_OPTS_INC(tcp_gp_inc_ss);
14522 * We don't allow any reduction
14528 rack->r_ctl.rack_per_of_gp_ss = ss;
14529 rack_log_pacing_delay_calc(rack,
14530 rack->r_ctl.rack_per_of_gp_ss,
14531 rack->r_ctl.rack_per_of_gp_ca,
14532 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
14535 case TCP_RACK_RR_CONF:
14536 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate);
14537 if (optval && optval <= 3)
14538 rack->r_rr_config = optval;
14540 rack->r_rr_config = 0;
14542 case TCP_BBR_HDWR_PACE:
14543 RACK_OPTS_INC(tcp_hdwr_pacing);
14545 if (rack->rack_hdrw_pacing == 0) {
14546 rack->rack_hdw_pace_ena = 1;
14547 rack->rack_attempt_hdwr_pace = 0;
14551 rack->rack_hdw_pace_ena = 0;
14553 if (rack->rack_hdrw_pacing) {
14554 rack->rack_hdrw_pacing = 0;
14555 in_pcbdetach_txrtlmt(rack->rc_inp);
14560 /* End Pacing related ones */
14561 case TCP_RACK_PRR_SENDALOT:
14562 /* Allow PRR to send more than one seg */
14563 RACK_OPTS_INC(tcp_rack_prr_sendalot);
14564 rack->r_ctl.rc_prr_sendalot = optval;
14566 case TCP_RACK_MIN_TO:
14567 /* Minimum time between rack t-o's in ms */
14568 RACK_OPTS_INC(tcp_rack_min_to);
14569 rack->r_ctl.rc_min_to = optval;
14571 case TCP_RACK_EARLY_SEG:
14572 /* If early recovery max segments */
14573 RACK_OPTS_INC(tcp_rack_early_seg);
14574 rack->r_ctl.rc_early_recovery_segs = optval;
14576 case TCP_RACK_REORD_THRESH:
14577 /* RACK reorder threshold (shift amount) */
14578 RACK_OPTS_INC(tcp_rack_reord_thresh);
14579 if ((optval > 0) && (optval < 31))
14580 rack->r_ctl.rc_reorder_shift = optval;
14584 case TCP_RACK_REORD_FADE:
14585 /* Does reordering fade after ms time */
14586 RACK_OPTS_INC(tcp_rack_reord_fade);
14587 rack->r_ctl.rc_reorder_fade = optval;
14589 case TCP_RACK_TLP_THRESH:
14590 /* RACK TLP theshold i.e. srtt+(srtt/N) */
14591 RACK_OPTS_INC(tcp_rack_tlp_thresh);
14593 rack->r_ctl.rc_tlp_threshold = optval;
14597 case TCP_BBR_USE_RACK_RR:
14598 RACK_OPTS_INC(tcp_rack_rr);
14600 rack->use_rack_rr = 1;
14602 rack->use_rack_rr = 0;
14604 case TCP_RACK_PKT_DELAY:
14605 /* RACK added ms i.e. rack-rtt + reord + N */
14606 RACK_OPTS_INC(tcp_rack_pkt_delay);
14607 rack->r_ctl.rc_pkt_delay = optval;
14609 case TCP_RACK_TLP_INC_VAR:
14610 /* Does TLP include rtt variance in t-o */
14613 case TCP_RACK_IDLE_REDUCE_HIGH:
14618 tp->t_delayed_ack = 0;
14620 tp->t_delayed_ack = 1;
14621 if (tp->t_flags & TF_DELACK) {
14622 tp->t_flags &= ~TF_DELACK;
14623 tp->t_flags |= TF_ACKNOW;
14624 NET_EPOCH_ENTER(et);
14626 NET_EPOCH_EXIT(et);
14630 case TCP_BBR_RACK_RTT_USE:
14631 if ((optval != USE_RTT_HIGH) &&
14632 (optval != USE_RTT_LOW) &&
14633 (optval != USE_RTT_AVG))
14636 rack->r_ctl.rc_rate_sample_method = optval;
14638 case TCP_DATA_AFTER_CLOSE:
14640 rack->rc_allow_data_af_clo = 1;
14642 rack->rc_allow_data_af_clo = 0;
14644 case TCP_RACK_PACE_REDUCE:
14645 /* sysctl only now */
14649 return (tcp_default_ctloutput(so, sopt, inp, tp));
14652 #ifdef NETFLIX_STATS
14653 tcp_log_socket_option(tp, sopt->sopt_name, optval, error);
14660 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
14661 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
14663 int32_t error, optval;
14666 * Because all our options are either boolean or an int, we can just
14667 * pull everything into optval and then unlock and copy. If we ever
14668 * add a option that is not a int, then this will have quite an
14669 * impact to this routine.
14672 switch (sopt->sopt_name) {
14673 case TCP_RACK_PROFILE:
14674 /* You cannot retrieve a profile, its write only */
14677 case TCP_RACK_PACE_TO_FILL:
14678 optval = rack->rc_pace_to_cwnd;
14680 case TCP_RACK_NO_PUSH_AT_MAX:
14681 optval = rack->r_ctl.rc_no_push_at_mrtt;
14683 case TCP_SHARED_CWND_ENABLE:
14684 optval = rack->rack_enable_scwnd;
14686 case TCP_RACK_NONRXT_CFG_RATE:
14687 optval = rack->rack_rec_nonrxt_use_cr;
14690 optval = rack->rack_no_prr;
14692 case TCP_RACK_DO_DETECTION:
14693 optval = rack->do_detection;
14695 case TCP_RACK_MBUF_QUEUE:
14696 /* Now do we use the LRO mbuf-queue feature */
14697 optval = rack->r_mbuf_queue;
14699 case TCP_TIMELY_DYN_ADJ:
14700 optval = rack->rc_gp_dyn_mul;
14702 case TCP_BBR_IWINTSO:
14703 optval = rack->rc_init_win;
14705 case TCP_RACK_PROP_RATE:
14706 optval = rack->r_ctl.rc_prop_rate;
14708 case TCP_RACK_PROP:
14709 /* RACK proportional rate reduction (bool) */
14710 optval = rack->r_ctl.rc_prop_reduce;
14712 case TCP_RACK_TLP_REDUCE:
14713 /* RACK TLP cwnd reduction (bool) */
14714 optval = rack->r_ctl.rc_tlp_cwnd_reduce;
14716 case TCP_RACK_EARLY_RECOV:
14717 /* Should recovery happen early (bool) */
14718 optval = rack->r_ctl.rc_early_recovery;
14720 case TCP_RACK_PACE_REDUCE:
14721 /* RACK Hptsi reduction factor (divisor) */
14724 case TCP_BBR_RACK_INIT_RATE:
14725 val = rack->r_ctl.init_rate;
14726 /* convert to kbits per sec */
14729 optval = (uint32_t)val;
14731 case TCP_RACK_FORCE_MSEG:
14732 optval = rack->rc_force_max_seg;
14734 case TCP_RACK_PACE_MAX_SEG:
14735 /* Max segments in a pace */
14736 optval = rack->rc_user_set_max_segs;
14738 case TCP_RACK_PACE_ALWAYS:
14739 /* Use the always pace method */
14740 optval = rack->rc_always_pace;
14742 case TCP_RACK_PRR_SENDALOT:
14743 /* Allow PRR to send more than one seg */
14744 optval = rack->r_ctl.rc_prr_sendalot;
14746 case TCP_RACK_MIN_TO:
14747 /* Minimum time between rack t-o's in ms */
14748 optval = rack->r_ctl.rc_min_to;
14750 case TCP_RACK_EARLY_SEG:
14751 /* If early recovery max segments */
14752 optval = rack->r_ctl.rc_early_recovery_segs;
14754 case TCP_RACK_REORD_THRESH:
14755 /* RACK reorder threshold (shift amount) */
14756 optval = rack->r_ctl.rc_reorder_shift;
14758 case TCP_RACK_REORD_FADE:
14759 /* Does reordering fade after ms time */
14760 optval = rack->r_ctl.rc_reorder_fade;
14762 case TCP_BBR_USE_RACK_RR:
14763 /* Do we use the rack cheat for rxt */
14764 optval = rack->use_rack_rr;
14766 case TCP_RACK_RR_CONF:
14767 optval = rack->r_rr_config;
14769 case TCP_BBR_HDWR_PACE:
14770 optval = rack->rack_hdw_pace_ena;
14772 case TCP_RACK_TLP_THRESH:
14773 /* RACK TLP theshold i.e. srtt+(srtt/N) */
14774 optval = rack->r_ctl.rc_tlp_threshold;
14776 case TCP_RACK_PKT_DELAY:
14777 /* RACK added ms i.e. rack-rtt + reord + N */
14778 optval = rack->r_ctl.rc_pkt_delay;
14780 case TCP_RACK_TLP_USE:
14781 optval = rack->rack_tlp_threshold_use;
14783 case TCP_RACK_TLP_INC_VAR:
14784 /* Does TLP include rtt variance in t-o */
14787 case TCP_RACK_IDLE_REDUCE_HIGH:
14790 case TCP_RACK_PACE_RATE_CA:
14791 optval = rack->r_ctl.rc_fixed_pacing_rate_ca;
14793 case TCP_RACK_PACE_RATE_SS:
14794 optval = rack->r_ctl.rc_fixed_pacing_rate_ss;
14796 case TCP_RACK_PACE_RATE_REC:
14797 optval = rack->r_ctl.rc_fixed_pacing_rate_rec;
14799 case TCP_RACK_GP_INCREASE_SS:
14800 optval = rack->r_ctl.rack_per_of_gp_ca;
14802 case TCP_RACK_GP_INCREASE_CA:
14803 optval = rack->r_ctl.rack_per_of_gp_ss;
14805 case TCP_BBR_RACK_RTT_USE:
14806 optval = rack->r_ctl.rc_rate_sample_method;
14809 optval = tp->t_delayed_ack;
14811 case TCP_DATA_AFTER_CLOSE:
14812 optval = rack->rc_allow_data_af_clo;
14814 case TCP_SHARED_CWND_TIME_LIMIT:
14815 optval = rack->r_limit_scw;
14818 return (tcp_default_ctloutput(so, sopt, inp, tp));
14823 error = sooptcopyout(sopt, &optval, sizeof optval);
14829 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp)
14831 int32_t error = EINVAL;
14832 struct tcp_rack *rack;
14834 rack = (struct tcp_rack *)tp->t_fb_ptr;
14835 if (rack == NULL) {
14839 if (sopt->sopt_dir == SOPT_SET) {
14840 return (rack_set_sockopt(so, sopt, inp, tp, rack));
14841 } else if (sopt->sopt_dir == SOPT_GET) {
14842 return (rack_get_sockopt(so, sopt, inp, tp, rack));
14850 rack_pru_options(struct tcpcb *tp, int flags)
14852 if (flags & PRUS_OOB)
14853 return (EOPNOTSUPP);
14857 static struct tcp_function_block __tcp_rack = {
14858 .tfb_tcp_block_name = __XSTRING(STACKNAME),
14859 .tfb_tcp_output = rack_output,
14860 .tfb_do_queued_segments = ctf_do_queued_segments,
14861 .tfb_do_segment_nounlock = rack_do_segment_nounlock,
14862 .tfb_tcp_do_segment = rack_do_segment,
14863 .tfb_tcp_ctloutput = rack_ctloutput,
14864 .tfb_tcp_fb_init = rack_init,
14865 .tfb_tcp_fb_fini = rack_fini,
14866 .tfb_tcp_timer_stop_all = rack_stopall,
14867 .tfb_tcp_timer_activate = rack_timer_activate,
14868 .tfb_tcp_timer_active = rack_timer_active,
14869 .tfb_tcp_timer_stop = rack_timer_stop,
14870 .tfb_tcp_rexmit_tmr = rack_remxt_tmr,
14871 .tfb_tcp_handoff_ok = rack_handoff_ok,
14872 .tfb_pru_options = rack_pru_options,
14875 static const char *rack_stack_names[] = {
14876 __XSTRING(STACKNAME),
14878 __XSTRING(STACKALIAS),
14883 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
14885 memset(mem, 0, size);
14890 rack_dtor(void *mem, int32_t size, void *arg)
14895 static bool rack_mod_inited = false;
14898 tcp_addrack(module_t mod, int32_t type, void *data)
14905 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
14906 sizeof(struct rack_sendmap),
14907 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
14909 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
14910 sizeof(struct tcp_rack),
14911 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
14913 sysctl_ctx_init(&rack_sysctl_ctx);
14914 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
14915 SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
14918 __XSTRING(STACKALIAS),
14920 __XSTRING(STACKNAME),
14922 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
14924 if (rack_sysctl_root == NULL) {
14925 printf("Failed to add sysctl node\n");
14929 rack_init_sysctls();
14930 num_stacks = nitems(rack_stack_names);
14931 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
14932 rack_stack_names, &num_stacks);
14934 printf("Failed to register %s stack name for "
14935 "%s module\n", rack_stack_names[num_stacks],
14936 __XSTRING(MODNAME));
14937 sysctl_ctx_free(&rack_sysctl_ctx);
14939 uma_zdestroy(rack_zone);
14940 uma_zdestroy(rack_pcb_zone);
14941 rack_counter_destroy();
14942 printf("Failed to register rack module -- err:%d\n", err);
14945 tcp_lro_reg_mbufq();
14946 rack_mod_inited = true;
14949 err = deregister_tcp_functions(&__tcp_rack, true, false);
14952 err = deregister_tcp_functions(&__tcp_rack, false, true);
14955 if (rack_mod_inited) {
14956 uma_zdestroy(rack_zone);
14957 uma_zdestroy(rack_pcb_zone);
14958 sysctl_ctx_free(&rack_sysctl_ctx);
14959 rack_counter_destroy();
14960 rack_mod_inited = false;
14962 tcp_lro_dereg_mbufq();
14966 return (EOPNOTSUPP);
14971 static moduledata_t tcp_rack = {
14972 .name = __XSTRING(MODNAME),
14973 .evhand = tcp_addrack,
14977 MODULE_VERSION(MODNAME, 1);
14978 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
14979 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);