2 * Copyright (c) 2016-2020 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include "opt_inet6.h"
32 #include "opt_ipsec.h"
33 #include "opt_tcpdebug.h"
34 #include "opt_ratelimit.h"
35 #include "opt_kern_tls.h"
36 #include <sys/param.h>
38 #include <sys/module.h>
39 #include <sys/kernel.h>
41 #include <sys/hhook.h>
44 #include <sys/malloc.h>
46 #include <sys/mutex.h>
48 #include <sys/proc.h> /* for proc0 declaration */
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
57 #include <sys/qmath.h>
59 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
63 #include <sys/refcount.h>
64 #include <sys/queue.h>
65 #include <sys/tim_filter.h>
67 #include <sys/kthread.h>
68 #include <sys/kern_prefetch.h>
69 #include <sys/protosw.h>
73 #include <net/route.h>
74 #include <net/route/nhop.h>
77 #define TCPSTATES /* for logging */
79 #include <netinet/in.h>
80 #include <netinet/in_kdtrace.h>
81 #include <netinet/in_pcb.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
85 #include <netinet/ip_var.h>
86 #include <netinet/ip6.h>
87 #include <netinet6/in6_pcb.h>
88 #include <netinet6/ip6_var.h>
89 #include <netinet/tcp.h>
91 #include <netinet/tcp_fsm.h>
92 #include <netinet/tcp_log_buf.h>
93 #include <netinet/tcp_seq.h>
94 #include <netinet/tcp_timer.h>
95 #include <netinet/tcp_var.h>
96 #include <netinet/tcp_hpts.h>
97 #include <netinet/tcp_ratelimit.h>
98 #include <netinet/tcpip.h>
99 #include <netinet/cc/cc.h>
100 #include <netinet/tcp_fastopen.h>
101 #include <netinet/tcp_lro.h>
102 #ifdef NETFLIX_SHARED_CWND
103 #include <netinet/tcp_shared_cwnd.h>
106 #include <netinet/tcp_debug.h>
107 #endif /* TCPDEBUG */
109 #include <netinet/tcp_offload.h>
112 #include <netinet6/tcp6_var.h>
115 #include <netipsec/ipsec_support.h>
117 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
118 #include <netipsec/ipsec.h>
119 #include <netipsec/ipsec6.h>
122 #include <netinet/udp.h>
123 #include <netinet/udp_var.h>
124 #include <machine/in_cksum.h>
127 #include <security/mac/mac_framework.h>
129 #include "sack_filter.h"
130 #include "tcp_rack.h"
131 #include "rack_bbr_common.h"
133 uma_zone_t rack_zone;
134 uma_zone_t rack_pcb_zone;
137 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t)))
140 struct sysctl_ctx_list rack_sysctl_ctx;
141 struct sysctl_oid *rack_sysctl_root;
147 * The RACK module incorporates a number of
148 * TCP ideas that have been put out into the IETF
149 * over the last few years:
150 * - Matt Mathis's Rate Halving which slowly drops
151 * the congestion window so that the ack clock can
152 * be maintained during a recovery.
153 * - Yuchung Cheng's RACK TCP (for which its named) that
154 * will stop us using the number of dup acks and instead
155 * use time as the gage of when we retransmit.
156 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
157 * of Dukkipati et.al.
158 * RACK depends on SACK, so if an endpoint arrives that
159 * cannot do SACK the state machine below will shuttle the
160 * connection back to using the "default" TCP stack that is
163 * To implement RACK the original TCP stack was first decomposed
164 * into a functional state machine with individual states
165 * for each of the possible TCP connection states. The do_segement
166 * functions role in life is to mandate the connection supports SACK
167 * initially and then assure that the RACK state matches the conenction
168 * state before calling the states do_segment function. Each
169 * state is simplified due to the fact that the original do_segment
170 * has been decomposed and we *know* what state we are in (no
171 * switches on the state) and all tests for SACK are gone. This
172 * greatly simplifies what each state does.
174 * TCP output is also over-written with a new version since it
175 * must maintain the new rack scoreboard.
178 static int32_t rack_tlp_thresh = 1;
179 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */
180 static int32_t rack_tlp_use_greater = 1;
181 static int32_t rack_reorder_thresh = 2;
182 static int32_t rack_reorder_fade = 60000; /* 0 - never fade, def 60,000
184 /* Attack threshold detections */
185 static uint32_t rack_highest_sack_thresh_seen = 0;
186 static uint32_t rack_highest_move_thresh_seen = 0;
188 static int32_t rack_pkt_delay = 1;
189 static int32_t rack_early_recovery = 1;
190 static int32_t rack_send_a_lot_in_prr = 1;
191 static int32_t rack_min_to = 1; /* Number of ms minimum timeout */
192 static int32_t rack_verbose_logging = 0;
193 static int32_t rack_ignore_data_after_close = 1;
194 static int32_t rack_enable_shared_cwnd = 0;
195 static int32_t rack_limits_scwnd = 1;
196 static int32_t rack_enable_mqueue_for_nonpaced = 0;
197 static int32_t rack_disable_prr = 0;
198 static int32_t use_rack_rr = 1;
199 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */
200 static int32_t rack_persist_min = 250; /* 250ms */
201 static int32_t rack_persist_max = 2000; /* 2 Second */
202 static int32_t rack_sack_not_required = 0; /* set to one to allow non-sack to use rack */
203 static int32_t rack_hw_tls_max_seg = 3; /* 3 means use hw-tls single segment */
204 static int32_t rack_default_init_window = 0; /* Use system default */
205 static int32_t rack_limit_time_with_srtt = 0;
206 static int32_t rack_hw_pace_adjust = 0;
208 * Currently regular tcp has a rto_min of 30ms
209 * the backoff goes 12 times so that ends up
210 * being a total of 122.850 seconds before a
211 * connection is killed.
213 static uint32_t rack_def_data_window = 20;
214 static uint32_t rack_goal_bdp = 2;
215 static uint32_t rack_min_srtts = 1;
216 static uint32_t rack_min_measure_usec = 0;
217 static int32_t rack_tlp_min = 10;
218 static int32_t rack_rto_min = 30; /* 30ms same as main freebsd */
219 static int32_t rack_rto_max = 4000; /* 4 seconds */
220 static const int32_t rack_free_cache = 2;
221 static int32_t rack_hptsi_segments = 40;
222 static int32_t rack_rate_sample_method = USE_RTT_LOW;
223 static int32_t rack_pace_every_seg = 0;
224 static int32_t rack_delayed_ack_time = 200; /* 200ms */
225 static int32_t rack_slot_reduction = 4;
226 static int32_t rack_wma_divisor = 8; /* For WMA calculation */
227 static int32_t rack_cwnd_block_ends_measure = 0;
228 static int32_t rack_rwnd_block_ends_measure = 0;
230 static int32_t rack_lower_cwnd_at_tlp = 0;
231 static int32_t rack_use_proportional_reduce = 0;
232 static int32_t rack_proportional_rate = 10;
233 static int32_t rack_tlp_max_resend = 2;
234 static int32_t rack_limited_retran = 0;
235 static int32_t rack_always_send_oldest = 0;
236 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
238 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */
239 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */
240 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */
243 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */
244 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */
245 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */
246 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */
247 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */
249 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */
250 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */
251 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */
252 static uint32_t rack_probertt_use_min_rtt_exit = 0;
253 static uint32_t rack_probe_rtt_sets_cwnd = 0;
254 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */
255 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in us */
256 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */
257 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */
258 static uint32_t rack_min_probertt_hold = 200000; /* Equal to delayed ack time */
259 static uint32_t rack_probertt_filter_life = 10000000;
260 static uint32_t rack_probertt_lower_within = 10;
261 static uint32_t rack_min_rtt_movement = 250; /* Must move at least 250 useconds to count as a lowering */
262 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */
263 static int32_t rack_probertt_clear_is = 1;
264 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */
265 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */
269 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */
271 /* Timely information */
272 /* Combine these two gives the range of 'no change' to bw */
273 /* ie the up/down provide the upper and lower bound */
274 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */
275 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */
276 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */
277 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */
278 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */
279 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multipler */
280 static int32_t rack_gp_increase_per = 2; /* 2% increase in multipler */
281 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */
282 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */
283 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */
284 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */
285 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */
286 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */
287 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */
288 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */
289 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */
290 static int32_t rack_use_max_for_nobackoff = 0;
291 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */
292 static int32_t rack_timely_no_stopping = 0;
293 static int32_t rack_down_raise_thresh = 100;
294 static int32_t rack_req_segs = 1;
296 /* Weird delayed ack mode */
297 static int32_t rack_use_imac_dack = 0;
298 /* Rack specific counters */
299 counter_u64_t rack_badfr;
300 counter_u64_t rack_badfr_bytes;
301 counter_u64_t rack_rtm_prr_retran;
302 counter_u64_t rack_rtm_prr_newdata;
303 counter_u64_t rack_timestamp_mismatch;
304 counter_u64_t rack_reorder_seen;
305 counter_u64_t rack_paced_segments;
306 counter_u64_t rack_unpaced_segments;
307 counter_u64_t rack_calc_zero;
308 counter_u64_t rack_calc_nonzero;
309 counter_u64_t rack_saw_enobuf;
310 counter_u64_t rack_saw_enetunreach;
311 counter_u64_t rack_per_timer_hole;
313 /* Tail loss probe counters */
314 counter_u64_t rack_tlp_tot;
315 counter_u64_t rack_tlp_newdata;
316 counter_u64_t rack_tlp_retran;
317 counter_u64_t rack_tlp_retran_bytes;
318 counter_u64_t rack_tlp_retran_fail;
319 counter_u64_t rack_to_tot;
320 counter_u64_t rack_to_arm_rack;
321 counter_u64_t rack_to_arm_tlp;
322 counter_u64_t rack_to_alloc;
323 counter_u64_t rack_to_alloc_hard;
324 counter_u64_t rack_to_alloc_emerg;
325 counter_u64_t rack_to_alloc_limited;
326 counter_u64_t rack_alloc_limited_conns;
327 counter_u64_t rack_split_limited;
329 counter_u64_t rack_sack_proc_all;
330 counter_u64_t rack_sack_proc_short;
331 counter_u64_t rack_sack_proc_restart;
332 counter_u64_t rack_sack_attacks_detected;
333 counter_u64_t rack_sack_attacks_reversed;
334 counter_u64_t rack_sack_used_next_merge;
335 counter_u64_t rack_sack_splits;
336 counter_u64_t rack_sack_used_prev_merge;
337 counter_u64_t rack_sack_skipped_acked;
338 counter_u64_t rack_ack_total;
339 counter_u64_t rack_express_sack;
340 counter_u64_t rack_sack_total;
341 counter_u64_t rack_move_none;
342 counter_u64_t rack_move_some;
344 counter_u64_t rack_used_tlpmethod;
345 counter_u64_t rack_used_tlpmethod2;
346 counter_u64_t rack_enter_tlp_calc;
347 counter_u64_t rack_input_idle_reduces;
348 counter_u64_t rack_collapsed_win;
349 counter_u64_t rack_tlp_does_nada;
350 counter_u64_t rack_try_scwnd;
352 /* Counters for HW TLS */
353 counter_u64_t rack_tls_rwnd;
354 counter_u64_t rack_tls_cwnd;
355 counter_u64_t rack_tls_app;
356 counter_u64_t rack_tls_other;
357 counter_u64_t rack_tls_filled;
358 counter_u64_t rack_tls_rxt;
359 counter_u64_t rack_tls_tlp;
361 /* Temp CPU counters */
362 counter_u64_t rack_find_high;
364 counter_u64_t rack_progress_drops;
365 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
366 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
369 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line);
372 rack_process_ack(struct mbuf *m, struct tcphdr *th,
373 struct socket *so, struct tcpcb *tp, struct tcpopt *to,
374 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val);
376 rack_process_data(struct mbuf *m, struct tcphdr *th,
377 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
378 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
380 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
381 struct tcphdr *th, uint16_t nsegs, uint16_t type, int32_t recovery);
382 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
383 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack,
385 static struct rack_sendmap *
386 rack_check_recovery_mode(struct tcpcb *tp,
389 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th,
391 static void rack_counter_destroy(void);
393 rack_ctloutput(struct socket *so, struct sockopt *sopt,
394 struct inpcb *inp, struct tcpcb *tp);
395 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
397 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line);
399 rack_do_segment(struct mbuf *m, struct tcphdr *th,
400 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
402 static void rack_dtor(void *mem, int32_t size, void *arg);
404 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm,
405 uint32_t t, uint32_t cts);
407 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
408 uint32_t flex1, uint32_t flex2,
409 uint32_t flex3, uint32_t flex4,
410 uint32_t flex5, uint32_t flex6,
411 uint16_t flex7, uint8_t mod);
413 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
414 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, struct rack_sendmap *rsm);
415 static struct rack_sendmap *
416 rack_find_high_nonack(struct tcp_rack *rack,
417 struct rack_sendmap *rsm);
418 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
419 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
420 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
422 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
423 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
425 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
426 tcp_seq th_ack, int line);
428 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss);
429 static int32_t rack_handoff_ok(struct tcpcb *tp);
430 static int32_t rack_init(struct tcpcb *tp);
431 static void rack_init_sysctls(void);
433 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
436 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
437 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts,
438 uint8_t pass, struct rack_sendmap *hintrsm, uint32_t us_cts);
440 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
441 struct rack_sendmap *rsm);
442 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm);
443 static int32_t rack_output(struct tcpcb *tp);
446 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
447 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
448 uint32_t cts, int *moved_two);
449 static void rack_post_recovery(struct tcpcb *tp, struct tcphdr *th);
450 static void rack_remxt_tmr(struct tcpcb *tp);
452 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
453 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
454 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
455 static int32_t rack_stopall(struct tcpcb *tp);
457 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type,
459 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type);
460 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
461 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type);
463 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
464 struct rack_sendmap *rsm, uint32_t ts, int32_t * lenp);
466 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
467 struct rack_sendmap *rsm, uint32_t ts);
469 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
470 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack);
471 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
473 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
474 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
475 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
477 rack_do_closing(struct mbuf *m, struct tcphdr *th,
478 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
479 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
481 rack_do_established(struct mbuf *m, struct tcphdr *th,
482 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
483 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
485 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
486 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
487 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos);
489 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
490 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
491 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
493 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
494 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
495 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
497 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
498 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
499 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
501 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
502 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
503 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
505 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
506 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
507 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
508 struct rack_sendmap *
509 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
511 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt,
512 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt);
514 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th);
516 int32_t rack_clear_counter=0;
520 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
525 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
526 if (error || req->newptr == NULL)
529 error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
534 printf("Clearing RACK counters\n");
536 counter_u64_zero(rack_badfr);
537 counter_u64_zero(rack_badfr_bytes);
538 counter_u64_zero(rack_rtm_prr_retran);
539 counter_u64_zero(rack_rtm_prr_newdata);
540 counter_u64_zero(rack_timestamp_mismatch);
541 counter_u64_zero(rack_reorder_seen);
542 counter_u64_zero(rack_tlp_tot);
543 counter_u64_zero(rack_tlp_newdata);
544 counter_u64_zero(rack_tlp_retran);
545 counter_u64_zero(rack_tlp_retran_bytes);
546 counter_u64_zero(rack_tlp_retran_fail);
547 counter_u64_zero(rack_to_tot);
548 counter_u64_zero(rack_to_arm_rack);
549 counter_u64_zero(rack_to_arm_tlp);
550 counter_u64_zero(rack_paced_segments);
551 counter_u64_zero(rack_calc_zero);
552 counter_u64_zero(rack_calc_nonzero);
553 counter_u64_zero(rack_unpaced_segments);
554 counter_u64_zero(rack_saw_enobuf);
555 counter_u64_zero(rack_saw_enetunreach);
556 counter_u64_zero(rack_per_timer_hole);
557 counter_u64_zero(rack_to_alloc_hard);
558 counter_u64_zero(rack_to_alloc_emerg);
559 counter_u64_zero(rack_sack_proc_all);
560 counter_u64_zero(rack_sack_proc_short);
561 counter_u64_zero(rack_sack_proc_restart);
562 counter_u64_zero(rack_to_alloc);
563 counter_u64_zero(rack_to_alloc_limited);
564 counter_u64_zero(rack_alloc_limited_conns);
565 counter_u64_zero(rack_split_limited);
566 counter_u64_zero(rack_find_high);
567 counter_u64_zero(rack_tls_rwnd);
568 counter_u64_zero(rack_tls_cwnd);
569 counter_u64_zero(rack_tls_app);
570 counter_u64_zero(rack_tls_other);
571 counter_u64_zero(rack_tls_filled);
572 counter_u64_zero(rack_tls_rxt);
573 counter_u64_zero(rack_tls_tlp);
574 counter_u64_zero(rack_sack_attacks_detected);
575 counter_u64_zero(rack_sack_attacks_reversed);
576 counter_u64_zero(rack_sack_used_next_merge);
577 counter_u64_zero(rack_sack_used_prev_merge);
578 counter_u64_zero(rack_sack_splits);
579 counter_u64_zero(rack_sack_skipped_acked);
580 counter_u64_zero(rack_ack_total);
581 counter_u64_zero(rack_express_sack);
582 counter_u64_zero(rack_sack_total);
583 counter_u64_zero(rack_move_none);
584 counter_u64_zero(rack_move_some);
585 counter_u64_zero(rack_used_tlpmethod);
586 counter_u64_zero(rack_used_tlpmethod2);
587 counter_u64_zero(rack_enter_tlp_calc);
588 counter_u64_zero(rack_progress_drops);
589 counter_u64_zero(rack_tlp_does_nada);
590 counter_u64_zero(rack_try_scwnd);
591 counter_u64_zero(rack_collapsed_win);
594 rack_clear_counter = 0;
601 rack_init_sysctls(void)
603 struct sysctl_oid *rack_counters;
604 struct sysctl_oid *rack_attack;
605 struct sysctl_oid *rack_pacing;
606 struct sysctl_oid *rack_timely;
607 struct sysctl_oid *rack_timers;
608 struct sysctl_oid *rack_tlp;
609 struct sysctl_oid *rack_misc;
610 struct sysctl_oid *rack_measure;
611 struct sysctl_oid *rack_probertt;
613 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
614 SYSCTL_CHILDREN(rack_sysctl_root),
617 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
618 "Rack Sack Attack Counters and Controls");
619 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
620 SYSCTL_CHILDREN(rack_sysctl_root),
623 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
625 SYSCTL_ADD_S32(&rack_sysctl_ctx,
626 SYSCTL_CHILDREN(rack_sysctl_root),
627 OID_AUTO, "rate_sample_method", CTLFLAG_RW,
628 &rack_rate_sample_method , USE_RTT_LOW,
629 "What method should we use for rate sampling 0=high, 1=low ");
630 SYSCTL_ADD_S32(&rack_sysctl_ctx,
631 SYSCTL_CHILDREN(rack_sysctl_root),
632 OID_AUTO, "hw_tlsmax", CTLFLAG_RW,
633 &rack_hw_tls_max_seg , 3,
634 "What is the maximum number of full TLS records that will be sent at once");
635 /* Probe rtt related controls */
636 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
637 SYSCTL_CHILDREN(rack_sysctl_root),
640 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
641 "ProbeRTT related Controls");
642 SYSCTL_ADD_U16(&rack_sysctl_ctx,
643 SYSCTL_CHILDREN(rack_probertt),
644 OID_AUTO, "exit_per_hpb", CTLFLAG_RW,
645 &rack_atexit_prtt_hbp, 130,
646 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%");
647 SYSCTL_ADD_U16(&rack_sysctl_ctx,
648 SYSCTL_CHILDREN(rack_probertt),
649 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW,
650 &rack_atexit_prtt, 130,
651 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%");
652 SYSCTL_ADD_U16(&rack_sysctl_ctx,
653 SYSCTL_CHILDREN(rack_probertt),
654 OID_AUTO, "gp_per_mul", CTLFLAG_RW,
655 &rack_per_of_gp_probertt, 60,
656 "What percentage of goodput do we pace at in probertt");
657 SYSCTL_ADD_U16(&rack_sysctl_ctx,
658 SYSCTL_CHILDREN(rack_probertt),
659 OID_AUTO, "gp_per_reduce", CTLFLAG_RW,
660 &rack_per_of_gp_probertt_reduce, 10,
661 "What percentage of goodput do we reduce every gp_srtt");
662 SYSCTL_ADD_U16(&rack_sysctl_ctx,
663 SYSCTL_CHILDREN(rack_probertt),
664 OID_AUTO, "gp_per_low", CTLFLAG_RW,
665 &rack_per_of_gp_lowthresh, 40,
666 "What percentage of goodput do we allow the multiplier to fall to");
667 SYSCTL_ADD_U32(&rack_sysctl_ctx,
668 SYSCTL_CHILDREN(rack_probertt),
669 OID_AUTO, "time_between", CTLFLAG_RW,
670 & rack_time_between_probertt, 96000000,
671 "How many useconds between the lowest rtt falling must past before we enter probertt");
672 SYSCTL_ADD_U32(&rack_sysctl_ctx,
673 SYSCTL_CHILDREN(rack_probertt),
674 OID_AUTO, "safety", CTLFLAG_RW,
675 &rack_probe_rtt_safety_val, 2000000,
676 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)");
677 SYSCTL_ADD_U32(&rack_sysctl_ctx,
678 SYSCTL_CHILDREN(rack_probertt),
679 OID_AUTO, "sets_cwnd", CTLFLAG_RW,
680 &rack_probe_rtt_sets_cwnd, 0,
681 "Do we set the cwnd too (if always_lower is on)");
682 SYSCTL_ADD_U32(&rack_sysctl_ctx,
683 SYSCTL_CHILDREN(rack_probertt),
684 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW,
685 &rack_max_drain_wait, 2,
686 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal");
687 SYSCTL_ADD_U32(&rack_sysctl_ctx,
688 SYSCTL_CHILDREN(rack_probertt),
689 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW,
691 "We must drain this many gp_srtt's waiting for flight to reach goal");
692 SYSCTL_ADD_U32(&rack_sysctl_ctx,
693 SYSCTL_CHILDREN(rack_probertt),
694 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW,
695 &rack_probertt_use_min_rtt_entry, 1,
696 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry");
697 SYSCTL_ADD_U32(&rack_sysctl_ctx,
698 SYSCTL_CHILDREN(rack_probertt),
699 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW,
700 &rack_probertt_use_min_rtt_exit, 0,
701 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt");
702 SYSCTL_ADD_U32(&rack_sysctl_ctx,
703 SYSCTL_CHILDREN(rack_probertt),
704 OID_AUTO, "length_div", CTLFLAG_RW,
705 &rack_probertt_gpsrtt_cnt_div, 0,
706 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)");
707 SYSCTL_ADD_U32(&rack_sysctl_ctx,
708 SYSCTL_CHILDREN(rack_probertt),
709 OID_AUTO, "length_mul", CTLFLAG_RW,
710 &rack_probertt_gpsrtt_cnt_mul, 0,
711 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)");
712 SYSCTL_ADD_U32(&rack_sysctl_ctx,
713 SYSCTL_CHILDREN(rack_probertt),
714 OID_AUTO, "holdtim_at_target", CTLFLAG_RW,
715 &rack_min_probertt_hold, 200000,
716 "What is the minimum time we hold probertt at target");
717 SYSCTL_ADD_U32(&rack_sysctl_ctx,
718 SYSCTL_CHILDREN(rack_probertt),
719 OID_AUTO, "filter_life", CTLFLAG_RW,
720 &rack_probertt_filter_life, 10000000,
721 "What is the time for the filters life in useconds");
722 SYSCTL_ADD_U32(&rack_sysctl_ctx,
723 SYSCTL_CHILDREN(rack_probertt),
724 OID_AUTO, "lower_within", CTLFLAG_RW,
725 &rack_probertt_lower_within, 10,
726 "If the rtt goes lower within this percentage of the time, go into probe-rtt");
727 SYSCTL_ADD_U32(&rack_sysctl_ctx,
728 SYSCTL_CHILDREN(rack_probertt),
729 OID_AUTO, "must_move", CTLFLAG_RW,
730 &rack_min_rtt_movement, 250,
731 "How much is the minimum movement in rtt to count as a drop for probertt purposes");
732 SYSCTL_ADD_U32(&rack_sysctl_ctx,
733 SYSCTL_CHILDREN(rack_probertt),
734 OID_AUTO, "clear_is_cnts", CTLFLAG_RW,
735 &rack_probertt_clear_is, 1,
736 "Do we clear I/S counts on exiting probe-rtt");
737 SYSCTL_ADD_S32(&rack_sysctl_ctx,
738 SYSCTL_CHILDREN(rack_probertt),
739 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW,
740 &rack_max_drain_hbp, 1,
741 "How many extra drain gpsrtt's do we get in highly buffered paths");
742 SYSCTL_ADD_S32(&rack_sysctl_ctx,
743 SYSCTL_CHILDREN(rack_probertt),
744 OID_AUTO, "hbp_threshold", CTLFLAG_RW,
746 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold");
747 /* Pacing related sysctls */
748 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
749 SYSCTL_CHILDREN(rack_sysctl_root),
752 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
753 "Pacing related Controls");
754 SYSCTL_ADD_S32(&rack_sysctl_ctx,
755 SYSCTL_CHILDREN(rack_pacing),
756 OID_AUTO, "max_pace_over", CTLFLAG_RW,
757 &rack_max_per_above, 30,
758 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)");
759 SYSCTL_ADD_S32(&rack_sysctl_ctx,
760 SYSCTL_CHILDREN(rack_pacing),
761 OID_AUTO, "pace_to_one", CTLFLAG_RW,
762 &rack_pace_one_seg, 0,
763 "Do we allow low b/w pacing of 1MSS instead of two");
764 SYSCTL_ADD_S32(&rack_sysctl_ctx,
765 SYSCTL_CHILDREN(rack_pacing),
766 OID_AUTO, "limit_wsrtt", CTLFLAG_RW,
767 &rack_limit_time_with_srtt, 0,
768 "Do we limit pacing time based on srtt");
769 SYSCTL_ADD_S32(&rack_sysctl_ctx,
770 SYSCTL_CHILDREN(rack_pacing),
771 OID_AUTO, "init_win", CTLFLAG_RW,
772 &rack_default_init_window, 0,
773 "Do we have a rack initial window 0 = system default");
774 SYSCTL_ADD_U32(&rack_sysctl_ctx,
775 SYSCTL_CHILDREN(rack_pacing),
776 OID_AUTO, "hw_pacing_adjust", CTLFLAG_RW,
777 &rack_hw_pace_adjust, 0,
778 "What percentage do we raise the MSS by (11 = 1.1%)");
779 SYSCTL_ADD_U16(&rack_sysctl_ctx,
780 SYSCTL_CHILDREN(rack_pacing),
781 OID_AUTO, "gp_per_ss", CTLFLAG_RW,
782 &rack_per_of_gp_ss, 250,
783 "If non zero, what percentage of goodput to pace at in slow start");
784 SYSCTL_ADD_U16(&rack_sysctl_ctx,
785 SYSCTL_CHILDREN(rack_pacing),
786 OID_AUTO, "gp_per_ca", CTLFLAG_RW,
787 &rack_per_of_gp_ca, 150,
788 "If non zero, what percentage of goodput to pace at in congestion avoidance");
789 SYSCTL_ADD_U16(&rack_sysctl_ctx,
790 SYSCTL_CHILDREN(rack_pacing),
791 OID_AUTO, "gp_per_rec", CTLFLAG_RW,
792 &rack_per_of_gp_rec, 200,
793 "If non zero, what percentage of goodput to pace at in recovery");
794 SYSCTL_ADD_S32(&rack_sysctl_ctx,
795 SYSCTL_CHILDREN(rack_pacing),
796 OID_AUTO, "pace_max_seg", CTLFLAG_RW,
797 &rack_hptsi_segments, 40,
798 "What size is the max for TSO segments in pacing and burst mitigation");
799 SYSCTL_ADD_S32(&rack_sysctl_ctx,
800 SYSCTL_CHILDREN(rack_pacing),
801 OID_AUTO, "burst_reduces", CTLFLAG_RW,
802 &rack_slot_reduction, 4,
803 "When doing only burst mitigation what is the reduce divisor");
804 SYSCTL_ADD_S32(&rack_sysctl_ctx,
805 SYSCTL_CHILDREN(rack_sysctl_root),
806 OID_AUTO, "use_pacing", CTLFLAG_RW,
807 &rack_pace_every_seg, 0,
808 "If set we use pacing, if clear we use only the original burst mitigation");
810 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
811 SYSCTL_CHILDREN(rack_sysctl_root),
814 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
815 "Rack Timely RTT Controls");
816 /* Timely based GP dynmics */
817 SYSCTL_ADD_S32(&rack_sysctl_ctx,
818 SYSCTL_CHILDREN(rack_timely),
819 OID_AUTO, "upper", CTLFLAG_RW,
820 &rack_gp_per_bw_mul_up, 2,
821 "Rack timely upper range for equal b/w (in percentage)");
822 SYSCTL_ADD_S32(&rack_sysctl_ctx,
823 SYSCTL_CHILDREN(rack_timely),
824 OID_AUTO, "lower", CTLFLAG_RW,
825 &rack_gp_per_bw_mul_down, 4,
826 "Rack timely lower range for equal b/w (in percentage)");
827 SYSCTL_ADD_S32(&rack_sysctl_ctx,
828 SYSCTL_CHILDREN(rack_timely),
829 OID_AUTO, "rtt_max_mul", CTLFLAG_RW,
830 &rack_gp_rtt_maxmul, 3,
831 "Rack timely multipler of lowest rtt for rtt_max");
832 SYSCTL_ADD_S32(&rack_sysctl_ctx,
833 SYSCTL_CHILDREN(rack_timely),
834 OID_AUTO, "rtt_min_div", CTLFLAG_RW,
835 &rack_gp_rtt_mindiv, 4,
836 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt");
837 SYSCTL_ADD_S32(&rack_sysctl_ctx,
838 SYSCTL_CHILDREN(rack_timely),
839 OID_AUTO, "rtt_min_mul", CTLFLAG_RW,
840 &rack_gp_rtt_minmul, 1,
841 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt");
842 SYSCTL_ADD_S32(&rack_sysctl_ctx,
843 SYSCTL_CHILDREN(rack_timely),
844 OID_AUTO, "decrease", CTLFLAG_RW,
845 &rack_gp_decrease_per, 20,
846 "Rack timely decrease percentage of our GP multiplication factor");
847 SYSCTL_ADD_S32(&rack_sysctl_ctx,
848 SYSCTL_CHILDREN(rack_timely),
849 OID_AUTO, "increase", CTLFLAG_RW,
850 &rack_gp_increase_per, 2,
851 "Rack timely increase perentage of our GP multiplication factor");
852 SYSCTL_ADD_S32(&rack_sysctl_ctx,
853 SYSCTL_CHILDREN(rack_timely),
854 OID_AUTO, "lowerbound", CTLFLAG_RW,
855 &rack_per_lower_bound, 50,
856 "Rack timely lowest percentage we allow GP multiplier to fall to");
857 SYSCTL_ADD_S32(&rack_sysctl_ctx,
858 SYSCTL_CHILDREN(rack_timely),
859 OID_AUTO, "upperboundss", CTLFLAG_RW,
860 &rack_per_upper_bound_ss, 0,
861 "Rack timely higest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)");
862 SYSCTL_ADD_S32(&rack_sysctl_ctx,
863 SYSCTL_CHILDREN(rack_timely),
864 OID_AUTO, "upperboundca", CTLFLAG_RW,
865 &rack_per_upper_bound_ca, 0,
866 "Rack timely higest percentage we allow GP multiplier to CA raise to (0 is no upperbound)");
867 SYSCTL_ADD_S32(&rack_sysctl_ctx,
868 SYSCTL_CHILDREN(rack_timely),
869 OID_AUTO, "dynamicgp", CTLFLAG_RW,
871 "Rack timely do we enable dynmaic timely goodput by default");
872 SYSCTL_ADD_S32(&rack_sysctl_ctx,
873 SYSCTL_CHILDREN(rack_timely),
874 OID_AUTO, "no_rec_red", CTLFLAG_RW,
875 &rack_gp_no_rec_chg, 1,
876 "Rack timely do we prohibit the recovery multiplier from being lowered");
877 SYSCTL_ADD_S32(&rack_sysctl_ctx,
878 SYSCTL_CHILDREN(rack_timely),
879 OID_AUTO, "red_clear_cnt", CTLFLAG_RW,
880 &rack_timely_dec_clear, 6,
881 "Rack timely what threshold do we count to before another boost during b/w decent");
882 SYSCTL_ADD_S32(&rack_sysctl_ctx,
883 SYSCTL_CHILDREN(rack_timely),
884 OID_AUTO, "max_push_rise", CTLFLAG_RW,
885 &rack_timely_max_push_rise, 3,
886 "Rack timely how many times do we push up with b/w increase");
887 SYSCTL_ADD_S32(&rack_sysctl_ctx,
888 SYSCTL_CHILDREN(rack_timely),
889 OID_AUTO, "max_push_drop", CTLFLAG_RW,
890 &rack_timely_max_push_drop, 3,
891 "Rack timely how many times do we push back on b/w decent");
892 SYSCTL_ADD_S32(&rack_sysctl_ctx,
893 SYSCTL_CHILDREN(rack_timely),
894 OID_AUTO, "min_segs", CTLFLAG_RW,
895 &rack_timely_min_segs, 4,
896 "Rack timely when setting the cwnd what is the min num segments");
897 SYSCTL_ADD_S32(&rack_sysctl_ctx,
898 SYSCTL_CHILDREN(rack_timely),
899 OID_AUTO, "noback_max", CTLFLAG_RW,
900 &rack_use_max_for_nobackoff, 0,
901 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min");
902 SYSCTL_ADD_S32(&rack_sysctl_ctx,
903 SYSCTL_CHILDREN(rack_timely),
904 OID_AUTO, "interim_timely_only", CTLFLAG_RW,
905 &rack_timely_int_timely_only, 0,
906 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)");
907 SYSCTL_ADD_S32(&rack_sysctl_ctx,
908 SYSCTL_CHILDREN(rack_timely),
909 OID_AUTO, "nonstop", CTLFLAG_RW,
910 &rack_timely_no_stopping, 0,
911 "Rack timely don't stop increase");
912 SYSCTL_ADD_S32(&rack_sysctl_ctx,
913 SYSCTL_CHILDREN(rack_timely),
914 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW,
915 &rack_down_raise_thresh, 100,
916 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)");
917 SYSCTL_ADD_S32(&rack_sysctl_ctx,
918 SYSCTL_CHILDREN(rack_timely),
919 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW,
921 "Bottom dragging if not these many segments outstanding and room");
923 /* TLP and Rack related parameters */
924 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
925 SYSCTL_CHILDREN(rack_sysctl_root),
928 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
929 "TLP and Rack related Controls");
930 SYSCTL_ADD_S32(&rack_sysctl_ctx,
931 SYSCTL_CHILDREN(rack_tlp),
932 OID_AUTO, "use_rrr", CTLFLAG_RW,
934 "Do we use Rack Rapid Recovery");
935 SYSCTL_ADD_S32(&rack_sysctl_ctx,
936 SYSCTL_CHILDREN(rack_tlp),
937 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW,
938 &rack_non_rxt_use_cr, 0,
939 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk");
940 SYSCTL_ADD_S32(&rack_sysctl_ctx,
941 SYSCTL_CHILDREN(rack_tlp),
942 OID_AUTO, "tlpmethod", CTLFLAG_RW,
943 &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
944 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
945 SYSCTL_ADD_S32(&rack_sysctl_ctx,
946 SYSCTL_CHILDREN(rack_tlp),
947 OID_AUTO, "limit", CTLFLAG_RW,
949 "How many TLP's can be sent without sending new data");
950 SYSCTL_ADD_S32(&rack_sysctl_ctx,
951 SYSCTL_CHILDREN(rack_tlp),
952 OID_AUTO, "use_greater", CTLFLAG_RW,
953 &rack_tlp_use_greater, 1,
954 "Should we use the rack_rtt time if its greater than srtt");
955 SYSCTL_ADD_S32(&rack_sysctl_ctx,
956 SYSCTL_CHILDREN(rack_tlp),
957 OID_AUTO, "tlpminto", CTLFLAG_RW,
959 "TLP minimum timeout per the specification (10ms)");
960 SYSCTL_ADD_S32(&rack_sysctl_ctx,
961 SYSCTL_CHILDREN(rack_tlp),
962 OID_AUTO, "send_oldest", CTLFLAG_RW,
963 &rack_always_send_oldest, 0,
964 "Should we always send the oldest TLP and RACK-TLP");
965 SYSCTL_ADD_S32(&rack_sysctl_ctx,
966 SYSCTL_CHILDREN(rack_tlp),
967 OID_AUTO, "rack_tlimit", CTLFLAG_RW,
968 &rack_limited_retran, 0,
969 "How many times can a rack timeout drive out sends");
970 SYSCTL_ADD_S32(&rack_sysctl_ctx,
971 SYSCTL_CHILDREN(rack_tlp),
972 OID_AUTO, "tlp_retry", CTLFLAG_RW,
973 &rack_tlp_max_resend, 2,
974 "How many times does TLP retry a single segment or multiple with no ACK");
975 SYSCTL_ADD_S32(&rack_sysctl_ctx,
976 SYSCTL_CHILDREN(rack_tlp),
977 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
978 &rack_lower_cwnd_at_tlp, 0,
979 "When a TLP completes a retran should we enter recovery");
980 SYSCTL_ADD_S32(&rack_sysctl_ctx,
981 SYSCTL_CHILDREN(rack_tlp),
982 OID_AUTO, "reorder_thresh", CTLFLAG_RW,
983 &rack_reorder_thresh, 2,
984 "What factor for rack will be added when seeing reordering (shift right)");
985 SYSCTL_ADD_S32(&rack_sysctl_ctx,
986 SYSCTL_CHILDREN(rack_tlp),
987 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
989 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
990 SYSCTL_ADD_S32(&rack_sysctl_ctx,
991 SYSCTL_CHILDREN(rack_tlp),
992 OID_AUTO, "reorder_fade", CTLFLAG_RW,
993 &rack_reorder_fade, 0,
994 "Does reorder detection fade, if so how many ms (0 means never)");
995 SYSCTL_ADD_S32(&rack_sysctl_ctx,
996 SYSCTL_CHILDREN(rack_tlp),
997 OID_AUTO, "pktdelay", CTLFLAG_RW,
999 "Extra RACK time (in ms) besides reordering thresh");
1001 /* Timer related controls */
1002 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1003 SYSCTL_CHILDREN(rack_sysctl_root),
1006 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1007 "Timer related controls");
1008 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1009 SYSCTL_CHILDREN(rack_timers),
1010 OID_AUTO, "persmin", CTLFLAG_RW,
1011 &rack_persist_min, 250,
1012 "What is the minimum time in milliseconds between persists");
1013 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1014 SYSCTL_CHILDREN(rack_timers),
1015 OID_AUTO, "persmax", CTLFLAG_RW,
1016 &rack_persist_max, 2000,
1017 "What is the largest delay in milliseconds between persists");
1018 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1019 SYSCTL_CHILDREN(rack_timers),
1020 OID_AUTO, "delayed_ack", CTLFLAG_RW,
1021 &rack_delayed_ack_time, 200,
1022 "Delayed ack time (200ms)");
1023 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1024 SYSCTL_CHILDREN(rack_timers),
1025 OID_AUTO, "minrto", CTLFLAG_RW,
1027 "Minimum RTO in ms -- set with caution below 1000 due to TLP");
1028 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1029 SYSCTL_CHILDREN(rack_timers),
1030 OID_AUTO, "maxrto", CTLFLAG_RW,
1032 "Maxiumum RTO in ms -- should be at least as large as min_rto");
1033 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1034 SYSCTL_CHILDREN(rack_timers),
1035 OID_AUTO, "minto", CTLFLAG_RW,
1037 "Minimum rack timeout in milliseconds");
1038 /* Measure controls */
1039 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1040 SYSCTL_CHILDREN(rack_sysctl_root),
1043 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1044 "Measure related controls");
1045 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1046 SYSCTL_CHILDREN(rack_measure),
1047 OID_AUTO, "wma_divisor", CTLFLAG_RW,
1048 &rack_wma_divisor, 8,
1049 "When doing b/w calculation what is the divisor for the WMA");
1050 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1051 SYSCTL_CHILDREN(rack_measure),
1052 OID_AUTO, "end_cwnd", CTLFLAG_RW,
1053 &rack_cwnd_block_ends_measure, 0,
1054 "Does a cwnd just-return end the measurement window (app limited)");
1055 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1056 SYSCTL_CHILDREN(rack_measure),
1057 OID_AUTO, "end_rwnd", CTLFLAG_RW,
1058 &rack_rwnd_block_ends_measure, 0,
1059 "Does an rwnd just-return end the measurement window (app limited -- not persists)");
1060 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1061 SYSCTL_CHILDREN(rack_measure),
1062 OID_AUTO, "min_target", CTLFLAG_RW,
1063 &rack_def_data_window, 20,
1064 "What is the minimum target window (in mss) for a GP measurements");
1065 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1066 SYSCTL_CHILDREN(rack_measure),
1067 OID_AUTO, "goal_bdp", CTLFLAG_RW,
1069 "What is the goal BDP to measure");
1070 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1071 SYSCTL_CHILDREN(rack_measure),
1072 OID_AUTO, "min_srtts", CTLFLAG_RW,
1074 "What is the goal BDP to measure");
1075 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1076 SYSCTL_CHILDREN(rack_measure),
1077 OID_AUTO, "min_measure_tim", CTLFLAG_RW,
1078 &rack_min_measure_usec, 0,
1079 "What is the Minimum time time for a measurement if 0, this is off");
1080 /* Misc rack controls */
1081 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1082 SYSCTL_CHILDREN(rack_sysctl_root),
1085 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1086 "Misc related controls");
1087 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1088 SYSCTL_CHILDREN(rack_misc),
1089 OID_AUTO, "shared_cwnd", CTLFLAG_RW,
1090 &rack_enable_shared_cwnd, 0,
1091 "Should RACK try to use the shared cwnd on connections where allowed");
1092 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1093 SYSCTL_CHILDREN(rack_misc),
1094 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW,
1095 &rack_limits_scwnd, 1,
1096 "Should RACK place low end time limits on the shared cwnd feature");
1097 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1098 SYSCTL_CHILDREN(rack_misc),
1099 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW,
1100 &rack_enable_mqueue_for_nonpaced, 0,
1101 "Should RACK use mbuf queuing for non-paced connections");
1102 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1103 SYSCTL_CHILDREN(rack_misc),
1104 OID_AUTO, "iMac_dack", CTLFLAG_RW,
1105 &rack_use_imac_dack, 0,
1106 "Should RACK try to emulate iMac delayed ack");
1107 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1108 SYSCTL_CHILDREN(rack_misc),
1109 OID_AUTO, "no_prr", CTLFLAG_RW,
1110 &rack_disable_prr, 0,
1111 "Should RACK not use prr and only pace (must have pacing on)");
1112 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1113 SYSCTL_CHILDREN(rack_misc),
1114 OID_AUTO, "bb_verbose", CTLFLAG_RW,
1115 &rack_verbose_logging, 0,
1116 "Should RACK black box logging be verbose");
1117 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1118 SYSCTL_CHILDREN(rack_misc),
1119 OID_AUTO, "data_after_close", CTLFLAG_RW,
1120 &rack_ignore_data_after_close, 1,
1121 "Do we hold off sending a RST until all pending data is ack'd");
1122 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1123 SYSCTL_CHILDREN(rack_misc),
1124 OID_AUTO, "no_sack_needed", CTLFLAG_RW,
1125 &rack_sack_not_required, 0,
1126 "Do we allow rack to run on connections not supporting SACK");
1127 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1128 SYSCTL_CHILDREN(rack_misc),
1129 OID_AUTO, "recovery_loss_prop", CTLFLAG_RW,
1130 &rack_use_proportional_reduce, 0,
1131 "Should we proportionaly reduce cwnd based on the number of losses ");
1132 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1133 SYSCTL_CHILDREN(rack_misc),
1134 OID_AUTO, "recovery_prop", CTLFLAG_RW,
1135 &rack_proportional_rate, 10,
1136 "What percent reduction per loss");
1137 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1138 SYSCTL_CHILDREN(rack_misc),
1139 OID_AUTO, "prr_sendalot", CTLFLAG_RW,
1140 &rack_send_a_lot_in_prr, 1,
1141 "Send a lot in prr");
1142 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1143 SYSCTL_CHILDREN(rack_misc),
1144 OID_AUTO, "earlyrecovery", CTLFLAG_RW,
1145 &rack_early_recovery, 1,
1146 "Do we do early recovery with rack");
1147 /* Sack Attacker detection stuff */
1148 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1149 SYSCTL_CHILDREN(rack_attack),
1150 OID_AUTO, "detect_highsackratio", CTLFLAG_RW,
1151 &rack_highest_sack_thresh_seen, 0,
1152 "Highest sack to ack ratio seen");
1153 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1154 SYSCTL_CHILDREN(rack_attack),
1155 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW,
1156 &rack_highest_move_thresh_seen, 0,
1157 "Highest move to non-move ratio seen");
1158 rack_ack_total = counter_u64_alloc(M_WAITOK);
1159 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1160 SYSCTL_CHILDREN(rack_attack),
1161 OID_AUTO, "acktotal", CTLFLAG_RD,
1163 "Total number of Ack's");
1164 rack_express_sack = counter_u64_alloc(M_WAITOK);
1165 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1166 SYSCTL_CHILDREN(rack_attack),
1167 OID_AUTO, "exp_sacktotal", CTLFLAG_RD,
1169 "Total expresss number of Sack's");
1170 rack_sack_total = counter_u64_alloc(M_WAITOK);
1171 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1172 SYSCTL_CHILDREN(rack_attack),
1173 OID_AUTO, "sacktotal", CTLFLAG_RD,
1175 "Total number of SACKs");
1176 rack_move_none = counter_u64_alloc(M_WAITOK);
1177 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1178 SYSCTL_CHILDREN(rack_attack),
1179 OID_AUTO, "move_none", CTLFLAG_RD,
1181 "Total number of SACK index reuse of postions under threshold");
1182 rack_move_some = counter_u64_alloc(M_WAITOK);
1183 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1184 SYSCTL_CHILDREN(rack_attack),
1185 OID_AUTO, "move_some", CTLFLAG_RD,
1187 "Total number of SACK index reuse of postions over threshold");
1188 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK);
1189 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1190 SYSCTL_CHILDREN(rack_attack),
1191 OID_AUTO, "attacks", CTLFLAG_RD,
1192 &rack_sack_attacks_detected,
1193 "Total number of SACK attackers that had sack disabled");
1194 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK);
1195 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1196 SYSCTL_CHILDREN(rack_attack),
1197 OID_AUTO, "reversed", CTLFLAG_RD,
1198 &rack_sack_attacks_reversed,
1199 "Total number of SACK attackers that were later determined false positive");
1200 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK);
1201 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1202 SYSCTL_CHILDREN(rack_attack),
1203 OID_AUTO, "nextmerge", CTLFLAG_RD,
1204 &rack_sack_used_next_merge,
1205 "Total number of times we used the next merge");
1206 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK);
1207 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1208 SYSCTL_CHILDREN(rack_attack),
1209 OID_AUTO, "prevmerge", CTLFLAG_RD,
1210 &rack_sack_used_prev_merge,
1211 "Total number of times we used the prev merge");
1213 rack_badfr = counter_u64_alloc(M_WAITOK);
1214 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1215 SYSCTL_CHILDREN(rack_counters),
1216 OID_AUTO, "badfr", CTLFLAG_RD,
1217 &rack_badfr, "Total number of bad FRs");
1218 rack_badfr_bytes = counter_u64_alloc(M_WAITOK);
1219 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1220 SYSCTL_CHILDREN(rack_counters),
1221 OID_AUTO, "badfr_bytes", CTLFLAG_RD,
1222 &rack_badfr_bytes, "Total number of bad FRs");
1223 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK);
1224 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1225 SYSCTL_CHILDREN(rack_counters),
1226 OID_AUTO, "prrsndret", CTLFLAG_RD,
1227 &rack_rtm_prr_retran,
1228 "Total number of prr based retransmits");
1229 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK);
1230 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1231 SYSCTL_CHILDREN(rack_counters),
1232 OID_AUTO, "prrsndnew", CTLFLAG_RD,
1233 &rack_rtm_prr_newdata,
1234 "Total number of prr based new transmits");
1235 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK);
1236 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1237 SYSCTL_CHILDREN(rack_counters),
1238 OID_AUTO, "tsnf", CTLFLAG_RD,
1239 &rack_timestamp_mismatch,
1240 "Total number of timestamps that we could not find the reported ts");
1241 rack_find_high = counter_u64_alloc(M_WAITOK);
1242 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1243 SYSCTL_CHILDREN(rack_counters),
1244 OID_AUTO, "findhigh", CTLFLAG_RD,
1246 "Total number of FIN causing find-high");
1247 rack_reorder_seen = counter_u64_alloc(M_WAITOK);
1248 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1249 SYSCTL_CHILDREN(rack_counters),
1250 OID_AUTO, "reordering", CTLFLAG_RD,
1252 "Total number of times we added delay due to reordering");
1253 rack_tlp_tot = counter_u64_alloc(M_WAITOK);
1254 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1255 SYSCTL_CHILDREN(rack_counters),
1256 OID_AUTO, "tlp_to_total", CTLFLAG_RD,
1258 "Total number of tail loss probe expirations");
1259 rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
1260 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1261 SYSCTL_CHILDREN(rack_counters),
1262 OID_AUTO, "tlp_new", CTLFLAG_RD,
1264 "Total number of tail loss probe sending new data");
1265 rack_tlp_retran = counter_u64_alloc(M_WAITOK);
1266 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1267 SYSCTL_CHILDREN(rack_counters),
1268 OID_AUTO, "tlp_retran", CTLFLAG_RD,
1270 "Total number of tail loss probe sending retransmitted data");
1271 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
1272 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1273 SYSCTL_CHILDREN(rack_counters),
1274 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
1275 &rack_tlp_retran_bytes,
1276 "Total bytes of tail loss probe sending retransmitted data");
1277 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK);
1278 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1279 SYSCTL_CHILDREN(rack_counters),
1280 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD,
1281 &rack_tlp_retran_fail,
1282 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)");
1283 rack_to_tot = counter_u64_alloc(M_WAITOK);
1284 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1285 SYSCTL_CHILDREN(rack_counters),
1286 OID_AUTO, "rack_to_tot", CTLFLAG_RD,
1288 "Total number of times the rack to expired");
1289 rack_to_arm_rack = counter_u64_alloc(M_WAITOK);
1290 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1291 SYSCTL_CHILDREN(rack_counters),
1292 OID_AUTO, "arm_rack", CTLFLAG_RD,
1294 "Total number of times the rack timer armed");
1295 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK);
1296 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1297 SYSCTL_CHILDREN(rack_counters),
1298 OID_AUTO, "arm_tlp", CTLFLAG_RD,
1300 "Total number of times the tlp timer armed");
1301 rack_calc_zero = counter_u64_alloc(M_WAITOK);
1302 rack_calc_nonzero = counter_u64_alloc(M_WAITOK);
1303 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1304 SYSCTL_CHILDREN(rack_counters),
1305 OID_AUTO, "calc_zero", CTLFLAG_RD,
1307 "Total number of times pacing time worked out to zero");
1308 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1309 SYSCTL_CHILDREN(rack_counters),
1310 OID_AUTO, "calc_nonzero", CTLFLAG_RD,
1312 "Total number of times pacing time worked out to non-zero");
1313 rack_paced_segments = counter_u64_alloc(M_WAITOK);
1314 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1315 SYSCTL_CHILDREN(rack_counters),
1316 OID_AUTO, "paced", CTLFLAG_RD,
1317 &rack_paced_segments,
1318 "Total number of times a segment send caused hptsi");
1319 rack_unpaced_segments = counter_u64_alloc(M_WAITOK);
1320 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1321 SYSCTL_CHILDREN(rack_counters),
1322 OID_AUTO, "unpaced", CTLFLAG_RD,
1323 &rack_unpaced_segments,
1324 "Total number of times a segment did not cause hptsi");
1325 rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
1326 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1327 SYSCTL_CHILDREN(rack_counters),
1328 OID_AUTO, "saw_enobufs", CTLFLAG_RD,
1330 "Total number of times a segment did not cause hptsi");
1331 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
1332 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1333 SYSCTL_CHILDREN(rack_counters),
1334 OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
1335 &rack_saw_enetunreach,
1336 "Total number of times a segment did not cause hptsi");
1337 rack_to_alloc = counter_u64_alloc(M_WAITOK);
1338 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1339 SYSCTL_CHILDREN(rack_counters),
1340 OID_AUTO, "allocs", CTLFLAG_RD,
1342 "Total allocations of tracking structures");
1343 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
1344 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1345 SYSCTL_CHILDREN(rack_counters),
1346 OID_AUTO, "allochard", CTLFLAG_RD,
1347 &rack_to_alloc_hard,
1348 "Total allocations done with sleeping the hard way");
1349 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
1350 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1351 SYSCTL_CHILDREN(rack_counters),
1352 OID_AUTO, "allocemerg", CTLFLAG_RD,
1353 &rack_to_alloc_emerg,
1354 "Total allocations done from emergency cache");
1355 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK);
1356 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1357 SYSCTL_CHILDREN(rack_counters),
1358 OID_AUTO, "alloc_limited", CTLFLAG_RD,
1359 &rack_to_alloc_limited,
1360 "Total allocations dropped due to limit");
1361 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK);
1362 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1363 SYSCTL_CHILDREN(rack_counters),
1364 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD,
1365 &rack_alloc_limited_conns,
1366 "Connections with allocations dropped due to limit");
1367 rack_split_limited = counter_u64_alloc(M_WAITOK);
1368 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1369 SYSCTL_CHILDREN(rack_counters),
1370 OID_AUTO, "split_limited", CTLFLAG_RD,
1371 &rack_split_limited,
1372 "Split allocations dropped due to limit");
1373 rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
1374 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1375 SYSCTL_CHILDREN(rack_counters),
1376 OID_AUTO, "sack_long", CTLFLAG_RD,
1377 &rack_sack_proc_all,
1378 "Total times we had to walk whole list for sack processing");
1379 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
1380 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1381 SYSCTL_CHILDREN(rack_counters),
1382 OID_AUTO, "sack_restart", CTLFLAG_RD,
1383 &rack_sack_proc_restart,
1384 "Total times we had to walk whole list due to a restart");
1385 rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
1386 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1387 SYSCTL_CHILDREN(rack_counters),
1388 OID_AUTO, "sack_short", CTLFLAG_RD,
1389 &rack_sack_proc_short,
1390 "Total times we took shortcut for sack processing");
1391 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK);
1392 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1393 SYSCTL_CHILDREN(rack_counters),
1394 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD,
1395 &rack_enter_tlp_calc,
1396 "Total times we called calc-tlp");
1397 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK);
1398 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1399 SYSCTL_CHILDREN(rack_counters),
1400 OID_AUTO, "hit_tlp_method", CTLFLAG_RD,
1401 &rack_used_tlpmethod,
1402 "Total number of runt sacks");
1403 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK);
1404 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1405 SYSCTL_CHILDREN(rack_counters),
1406 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD,
1407 &rack_used_tlpmethod2,
1408 "Total number of times we hit TLP method 2");
1409 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK);
1410 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1411 SYSCTL_CHILDREN(rack_attack),
1412 OID_AUTO, "skipacked", CTLFLAG_RD,
1413 &rack_sack_skipped_acked,
1414 "Total number of times we skipped previously sacked");
1415 rack_sack_splits = counter_u64_alloc(M_WAITOK);
1416 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1417 SYSCTL_CHILDREN(rack_attack),
1418 OID_AUTO, "ofsplit", CTLFLAG_RD,
1420 "Total number of times we did the old fashion tree split");
1421 rack_progress_drops = counter_u64_alloc(M_WAITOK);
1422 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1423 SYSCTL_CHILDREN(rack_counters),
1424 OID_AUTO, "prog_drops", CTLFLAG_RD,
1425 &rack_progress_drops,
1426 "Total number of progress drops");
1427 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
1428 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1429 SYSCTL_CHILDREN(rack_counters),
1430 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
1431 &rack_input_idle_reduces,
1432 "Total number of idle reductions on input");
1433 rack_collapsed_win = counter_u64_alloc(M_WAITOK);
1434 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1435 SYSCTL_CHILDREN(rack_counters),
1436 OID_AUTO, "collapsed_win", CTLFLAG_RD,
1437 &rack_collapsed_win,
1438 "Total number of collapsed windows");
1439 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK);
1440 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1441 SYSCTL_CHILDREN(rack_counters),
1442 OID_AUTO, "tlp_nada", CTLFLAG_RD,
1443 &rack_tlp_does_nada,
1444 "Total number of nada tlp calls");
1445 rack_try_scwnd = counter_u64_alloc(M_WAITOK);
1446 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1447 SYSCTL_CHILDREN(rack_counters),
1448 OID_AUTO, "tried_scwnd", CTLFLAG_RD,
1450 "Total number of scwnd attempts");
1452 rack_tls_rwnd = counter_u64_alloc(M_WAITOK);
1453 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1454 SYSCTL_CHILDREN(rack_counters),
1455 OID_AUTO, "tls_rwnd", CTLFLAG_RD,
1457 "Total hdwr tls rwnd limited");
1458 rack_tls_cwnd = counter_u64_alloc(M_WAITOK);
1459 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1460 SYSCTL_CHILDREN(rack_counters),
1461 OID_AUTO, "tls_cwnd", CTLFLAG_RD,
1463 "Total hdwr tls cwnd limited");
1464 rack_tls_app = counter_u64_alloc(M_WAITOK);
1465 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1466 SYSCTL_CHILDREN(rack_counters),
1467 OID_AUTO, "tls_app", CTLFLAG_RD,
1469 "Total hdwr tls app limited");
1470 rack_tls_other = counter_u64_alloc(M_WAITOK);
1471 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1472 SYSCTL_CHILDREN(rack_counters),
1473 OID_AUTO, "tls_other", CTLFLAG_RD,
1475 "Total hdwr tls other limited");
1476 rack_tls_filled = counter_u64_alloc(M_WAITOK);
1477 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1478 SYSCTL_CHILDREN(rack_counters),
1479 OID_AUTO, "tls_filled", CTLFLAG_RD,
1481 "Total hdwr tls filled");
1482 rack_tls_rxt = counter_u64_alloc(M_WAITOK);
1483 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1484 SYSCTL_CHILDREN(rack_counters),
1485 OID_AUTO, "tls_rxt", CTLFLAG_RD,
1488 rack_tls_tlp = counter_u64_alloc(M_WAITOK);
1489 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1490 SYSCTL_CHILDREN(rack_counters),
1491 OID_AUTO, "tls_tlp", CTLFLAG_RD,
1493 "Total hdwr tls tlp");
1494 rack_per_timer_hole = counter_u64_alloc(M_WAITOK);
1495 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1496 SYSCTL_CHILDREN(rack_counters),
1497 OID_AUTO, "timer_hole", CTLFLAG_RD,
1498 &rack_per_timer_hole,
1499 "Total persists start in timer hole");
1500 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
1501 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1502 OID_AUTO, "outsize", CTLFLAG_RD,
1503 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
1504 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
1505 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1506 OID_AUTO, "opts", CTLFLAG_RD,
1507 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
1508 SYSCTL_ADD_PROC(&rack_sysctl_ctx,
1509 SYSCTL_CHILDREN(rack_sysctl_root),
1510 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1511 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
1515 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a)
1517 if (SEQ_GEQ(b->r_start, a->r_start) &&
1518 SEQ_LT(b->r_start, a->r_end)) {
1520 * The entry b is within the
1522 * a -- |-------------|
1527 * b -- |-----------|
1530 } else if (SEQ_GEQ(b->r_start, a->r_end)) {
1532 * b falls as either the next
1533 * sequence block after a so a
1534 * is said to be smaller than b.
1544 * Whats left is where a is
1545 * larger than b. i.e:
1549 * b -- |--------------|
1554 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1555 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1558 rc_init_window(struct tcp_rack *rack)
1562 if (rack->rc_init_win == 0) {
1564 * Nothing set by the user, use the system stack
1567 return(tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)));
1569 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win;
1574 rack_get_fixed_pacing_bw(struct tcp_rack *rack)
1576 if (IN_RECOVERY(rack->rc_tp->t_flags))
1577 return (rack->r_ctl.rc_fixed_pacing_rate_rec);
1578 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
1579 return (rack->r_ctl.rc_fixed_pacing_rate_ss);
1581 return (rack->r_ctl.rc_fixed_pacing_rate_ca);
1585 rack_get_bw(struct tcp_rack *rack)
1587 if (rack->use_fixed_rate) {
1588 /* Return the fixed pacing rate */
1589 return (rack_get_fixed_pacing_bw(rack));
1591 if (rack->r_ctl.gp_bw == 0) {
1593 * We have yet no b/w measurement,
1594 * if we have a user set initial bw
1595 * return it. If we don't have that and
1596 * we have an srtt, use the tcp IW (10) to
1597 * calculate a fictional b/w over the SRTT
1598 * which is more or less a guess. Note
1599 * we don't use our IW from rack on purpose
1600 * so if we have like IW=30, we are not
1601 * calculating a "huge" b/w.
1604 if (rack->r_ctl.init_rate)
1605 return (rack->r_ctl.init_rate);
1607 /* Has the user set a max peak rate? */
1608 #ifdef NETFLIX_PEAKRATE
1609 if (rack->rc_tp->t_maxpeakrate)
1610 return (rack->rc_tp->t_maxpeakrate);
1612 /* Ok lets come up with the IW guess, if we have a srtt */
1613 if (rack->rc_tp->t_srtt == 0) {
1615 * Go with old pacing method
1616 * i.e. burst mitigation only.
1620 /* Ok lets get the initial TCP win (not racks) */
1621 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp));
1622 srtt = ((uint64_t)TICKS_2_USEC(rack->rc_tp->t_srtt) >> TCP_RTT_SHIFT);
1623 bw *= (uint64_t)USECS_IN_SECOND;
1629 if(rack->r_ctl.num_avg >= RACK_REQ_AVG) {
1630 /* Averaging is done, we can return the value */
1631 bw = rack->r_ctl.gp_bw;
1633 /* Still doing initial average must calculate */
1634 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_avg;
1636 #ifdef NETFLIX_PEAKRATE
1637 if ((rack->rc_tp->t_maxpeakrate) &&
1638 (bw > rack->rc_tp->t_maxpeakrate)) {
1639 /* The user has set a peak rate to pace at
1640 * don't allow us to pace faster than that.
1642 return (rack->rc_tp->t_maxpeakrate);
1650 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm)
1652 if (rack->use_fixed_rate) {
1654 } else if (rack->in_probe_rtt && (rsm == NULL))
1655 return(rack->r_ctl.rack_per_of_gp_probertt);
1656 else if ((IN_RECOVERY(rack->rc_tp->t_flags) &&
1657 rack->r_ctl.rack_per_of_gp_rec)) {
1659 /* a retransmission always use the recovery rate */
1660 return(rack->r_ctl.rack_per_of_gp_rec);
1661 } else if (rack->rack_rec_nonrxt_use_cr) {
1662 /* Directed to use the configured rate */
1663 goto configured_rate;
1664 } else if (rack->rack_no_prr &&
1665 (rack->r_ctl.rack_per_of_gp_rec > 100)) {
1666 /* No PRR, lets just use the b/w estimate only */
1670 * Here we may have a non-retransmit but we
1671 * have no overrides, so just use the recovery
1672 * rate (prr is in effect).
1674 return(rack->r_ctl.rack_per_of_gp_rec);
1678 /* For the configured rate we look at our cwnd vs the ssthresh */
1679 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
1680 return (rack->r_ctl.rack_per_of_gp_ss);
1682 return(rack->r_ctl.rack_per_of_gp_ca);
1686 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm)
1689 * We allow rack_per_of_gp_xx to dictate our bw rate we want.
1694 gain = (uint64_t)rack_get_output_gain(rack, rsm);
1696 bw_est /= (uint64_t)100;
1697 /* Never fall below the minimum (def 64kbps) */
1698 if (bw_est < RACK_MIN_BW)
1699 bw_est = RACK_MIN_BW;
1704 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod)
1706 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1707 union tcp_log_stackspecific log;
1710 if ((mod != 1) && (rack_verbose_logging == 0)) {
1712 * We get 3 values currently for mod
1713 * 1 - We are retransmitting and this tells the reason.
1714 * 2 - We are clearing a dup-ack count.
1715 * 3 - We are incrementing a dup-ack count.
1717 * The clear/increment are only logged
1718 * if you have BBverbose on.
1722 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1723 log.u_bbr.flex1 = tsused;
1724 log.u_bbr.flex2 = thresh;
1725 log.u_bbr.flex3 = rsm->r_flags;
1726 log.u_bbr.flex4 = rsm->r_dupack;
1727 log.u_bbr.flex5 = rsm->r_start;
1728 log.u_bbr.flex6 = rsm->r_end;
1729 log.u_bbr.flex8 = mod;
1730 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1731 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1732 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1733 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1734 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1735 &rack->rc_inp->inp_socket->so_rcv,
1736 &rack->rc_inp->inp_socket->so_snd,
1737 BBR_LOG_SETTINGS_CHG, 0,
1738 0, &log, false, &tv);
1745 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
1747 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1748 union tcp_log_stackspecific log;
1751 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1752 log.u_bbr.flex1 = TICKS_2_MSEC(rack->rc_tp->t_srtt >> TCP_RTT_SHIFT);
1753 log.u_bbr.flex2 = to * 1000;
1754 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
1755 log.u_bbr.flex4 = slot;
1756 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot;
1757 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
1758 log.u_bbr.flex7 = rack->rc_in_persist;
1759 log.u_bbr.flex8 = which;
1760 if (rack->rack_no_prr)
1761 log.u_bbr.pkts_out = 0;
1763 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
1764 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1765 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1766 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1767 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1768 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1769 &rack->rc_inp->inp_socket->so_rcv,
1770 &rack->rc_inp->inp_socket->so_snd,
1771 BBR_LOG_TIMERSTAR, 0,
1772 0, &log, false, &tv);
1777 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm)
1779 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1780 union tcp_log_stackspecific log;
1783 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1784 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1785 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1786 log.u_bbr.flex8 = to_num;
1787 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
1788 log.u_bbr.flex2 = rack->rc_rack_rtt;
1790 log.u_bbr.flex3 = 0;
1792 log.u_bbr.flex3 = rsm->r_end - rsm->r_start;
1793 if (rack->rack_no_prr)
1794 log.u_bbr.flex5 = 0;
1796 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
1797 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1798 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1799 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1800 &rack->rc_inp->inp_socket->so_rcv,
1801 &rack->rc_inp->inp_socket->so_snd,
1803 0, &log, false, &tv);
1808 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len,
1809 struct rack_sendmap *rsm, int conf)
1811 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
1812 union tcp_log_stackspecific log;
1814 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1815 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1816 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1817 log.u_bbr.flex1 = t;
1818 log.u_bbr.flex2 = len;
1819 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt * HPTS_USEC_IN_MSEC;
1820 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest * HPTS_USEC_IN_MSEC;
1821 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest * HPTS_USEC_IN_MSEC;
1822 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_rtt_cnt;
1823 log.u_bbr.flex7 = conf;
1824 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot * (uint64_t)HPTS_USEC_IN_MSEC;
1825 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
1826 if (rack->rack_no_prr)
1827 log.u_bbr.pkts_out = 0;
1829 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
1830 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1831 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtt;
1832 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags;
1833 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1835 log.u_bbr.pkt_epoch = rsm->r_start;
1836 log.u_bbr.lost = rsm->r_end;
1837 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt;
1841 log.u_bbr.pkt_epoch = rack->rc_tp->iss;
1843 log.u_bbr.cwnd_gain = 0;
1845 /* Write out general bits of interest rrs here */
1846 log.u_bbr.use_lt_bw = rack->rc_highly_buffered;
1847 log.u_bbr.use_lt_bw <<= 1;
1848 log.u_bbr.use_lt_bw |= rack->forced_ack;
1849 log.u_bbr.use_lt_bw <<= 1;
1850 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul;
1851 log.u_bbr.use_lt_bw <<= 1;
1852 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
1853 log.u_bbr.use_lt_bw <<= 1;
1854 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
1855 log.u_bbr.use_lt_bw <<= 1;
1856 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
1857 log.u_bbr.use_lt_bw <<= 1;
1858 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
1859 log.u_bbr.use_lt_bw <<= 1;
1860 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom;
1861 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight;
1862 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts;
1863 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered;
1864 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts;
1865 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt;
1866 TCP_LOG_EVENTP(tp, NULL,
1867 &rack->rc_inp->inp_socket->so_rcv,
1868 &rack->rc_inp->inp_socket->so_snd,
1870 0, &log, false, &tv);
1875 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
1878 * Log the rtt sample we are
1879 * applying to the srtt algorithm in
1882 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1883 union tcp_log_stackspecific log;
1886 /* Convert our ms to a microsecond */
1887 memset(&log, 0, sizeof(log));
1888 log.u_bbr.flex1 = rtt * 1000;
1889 log.u_bbr.flex2 = rack->r_ctl.ack_count;
1890 log.u_bbr.flex3 = rack->r_ctl.sack_count;
1891 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
1892 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra;
1893 log.u_bbr.flex8 = rack->sack_attack_disable;
1894 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1895 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1896 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1897 &rack->rc_inp->inp_socket->so_rcv,
1898 &rack->rc_inp->inp_socket->so_snd,
1900 0, &log, false, &tv);
1906 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line)
1908 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
1909 union tcp_log_stackspecific log;
1912 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1913 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1914 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1915 log.u_bbr.flex1 = line;
1916 log.u_bbr.flex2 = tick;
1917 log.u_bbr.flex3 = tp->t_maxunacktime;
1918 log.u_bbr.flex4 = tp->t_acktime;
1919 log.u_bbr.flex8 = event;
1920 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1921 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1922 TCP_LOG_EVENTP(tp, NULL,
1923 &rack->rc_inp->inp_socket->so_rcv,
1924 &rack->rc_inp->inp_socket->so_snd,
1925 BBR_LOG_PROGRESS, 0,
1926 0, &log, false, &tv);
1931 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv)
1933 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1934 union tcp_log_stackspecific log;
1936 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1937 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1938 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1939 log.u_bbr.flex1 = slot;
1940 if (rack->rack_no_prr)
1941 log.u_bbr.flex2 = 0;
1943 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt;
1944 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
1945 log.u_bbr.flex8 = rack->rc_in_persist;
1946 log.u_bbr.timeStamp = cts;
1947 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1948 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1949 &rack->rc_inp->inp_socket->so_rcv,
1950 &rack->rc_inp->inp_socket->so_snd,
1952 0, &log, false, tv);
1957 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out)
1959 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1960 union tcp_log_stackspecific log;
1963 memset(&log, 0, sizeof(log));
1964 log.u_bbr.flex1 = did_out;
1965 log.u_bbr.flex2 = nxt_pkt;
1966 log.u_bbr.flex3 = way_out;
1967 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
1968 if (rack->rack_no_prr)
1969 log.u_bbr.flex5 = 0;
1971 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
1972 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs;
1973 log.u_bbr.flex7 = rack->r_wanted_output;
1974 log.u_bbr.flex8 = rack->rc_in_persist;
1975 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1976 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1977 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1978 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1979 &rack->rc_inp->inp_socket->so_rcv,
1980 &rack->rc_inp->inp_socket->so_snd,
1981 BBR_LOG_DOSEG_DONE, 0,
1982 0, &log, false, &tv);
1987 rack_log_type_hrdwtso(struct tcpcb *tp, struct tcp_rack *rack, int len, int mod, int32_t orig_len, int frm)
1989 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
1990 union tcp_log_stackspecific log;
1994 memset(&log, 0, sizeof(log));
1995 cts = tcp_get_usecs(&tv);
1996 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs;
1997 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
1998 log.u_bbr.flex4 = len;
1999 log.u_bbr.flex5 = orig_len;
2000 log.u_bbr.flex6 = rack->r_ctl.rc_sacked;
2001 log.u_bbr.flex7 = mod;
2002 log.u_bbr.flex8 = frm;
2003 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2004 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2005 TCP_LOG_EVENTP(tp, NULL,
2006 &tp->t_inpcb->inp_socket->so_rcv,
2007 &tp->t_inpcb->inp_socket->so_snd,
2009 0, &log, false, &tv);
2014 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot,
2015 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use)
2017 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2018 union tcp_log_stackspecific log;
2021 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2022 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2023 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2024 log.u_bbr.flex1 = slot;
2025 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
2026 log.u_bbr.flex4 = reason;
2027 if (rack->rack_no_prr)
2028 log.u_bbr.flex5 = 0;
2030 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2031 log.u_bbr.flex7 = hpts_calling;
2032 log.u_bbr.flex8 = rack->rc_in_persist;
2033 log.u_bbr.lt_epoch = cwnd_to_use;
2034 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2035 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2036 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2037 &rack->rc_inp->inp_socket->so_rcv,
2038 &rack->rc_inp->inp_socket->so_snd,
2040 tlen, &log, false, &tv);
2045 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts,
2046 struct timeval *tv, uint32_t flags_on_entry)
2048 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2049 union tcp_log_stackspecific log;
2051 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2052 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2053 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2054 log.u_bbr.flex1 = line;
2055 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to;
2056 log.u_bbr.flex3 = flags_on_entry;
2057 log.u_bbr.flex4 = us_cts;
2058 if (rack->rack_no_prr)
2059 log.u_bbr.flex5 = 0;
2061 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2062 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2063 log.u_bbr.flex7 = hpts_removed;
2064 log.u_bbr.flex8 = 1;
2065 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags;
2066 log.u_bbr.timeStamp = us_cts;
2067 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2068 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2069 &rack->rc_inp->inp_socket->so_rcv,
2070 &rack->rc_inp->inp_socket->so_snd,
2071 BBR_LOG_TIMERCANC, 0,
2072 0, &log, false, tv);
2077 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
2078 uint32_t flex1, uint32_t flex2,
2079 uint32_t flex3, uint32_t flex4,
2080 uint32_t flex5, uint32_t flex6,
2081 uint16_t flex7, uint8_t mod)
2083 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2084 union tcp_log_stackspecific log;
2088 /* No you can't use 1, its for the real to cancel */
2091 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2092 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2093 log.u_bbr.flex1 = flex1;
2094 log.u_bbr.flex2 = flex2;
2095 log.u_bbr.flex3 = flex3;
2096 log.u_bbr.flex4 = flex4;
2097 log.u_bbr.flex5 = flex5;
2098 log.u_bbr.flex6 = flex6;
2099 log.u_bbr.flex7 = flex7;
2100 log.u_bbr.flex8 = mod;
2101 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2102 &rack->rc_inp->inp_socket->so_rcv,
2103 &rack->rc_inp->inp_socket->so_snd,
2104 BBR_LOG_TIMERCANC, 0,
2105 0, &log, false, &tv);
2110 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
2112 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2113 union tcp_log_stackspecific log;
2116 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2117 log.u_bbr.flex1 = timers;
2118 log.u_bbr.flex2 = ret;
2119 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
2120 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2121 log.u_bbr.flex5 = cts;
2122 if (rack->rack_no_prr)
2123 log.u_bbr.flex6 = 0;
2125 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt;
2126 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2127 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2128 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2129 &rack->rc_inp->inp_socket->so_rcv,
2130 &rack->rc_inp->inp_socket->so_snd,
2131 BBR_LOG_TO_PROCESS, 0,
2132 0, &log, false, &tv);
2137 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd)
2139 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2140 union tcp_log_stackspecific log;
2143 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2144 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out;
2145 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs;
2146 if (rack->rack_no_prr)
2147 log.u_bbr.flex3 = 0;
2149 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt;
2150 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered;
2151 log.u_bbr.flex5 = rack->r_ctl.rc_sacked;
2152 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt;
2153 log.u_bbr.flex8 = frm;
2154 log.u_bbr.pkts_out = orig_cwnd;
2155 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2156 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2157 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2158 &rack->rc_inp->inp_socket->so_rcv,
2159 &rack->rc_inp->inp_socket->so_snd,
2161 0, &log, false, &tv);
2165 #ifdef NETFLIX_EXP_DETECTION
2167 rack_log_sad(struct tcp_rack *rack, int event)
2169 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2170 union tcp_log_stackspecific log;
2173 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2174 log.u_bbr.flex1 = rack->r_ctl.sack_count;
2175 log.u_bbr.flex2 = rack->r_ctl.ack_count;
2176 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra;
2177 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
2178 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced;
2179 log.u_bbr.flex6 = tcp_sack_to_ack_thresh;
2180 log.u_bbr.pkts_out = tcp_sack_to_move_thresh;
2181 log.u_bbr.lt_epoch = (tcp_force_detection << 8);
2182 log.u_bbr.lt_epoch |= rack->do_detection;
2183 log.u_bbr.applimited = tcp_map_minimum;
2184 log.u_bbr.flex7 = rack->sack_attack_disable;
2185 log.u_bbr.flex8 = event;
2186 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2187 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2188 log.u_bbr.delivered = tcp_sad_decay_val;
2189 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2190 &rack->rc_inp->inp_socket->so_rcv,
2191 &rack->rc_inp->inp_socket->so_snd,
2192 TCP_SAD_DETECTION, 0,
2193 0, &log, false, &tv);
2199 rack_counter_destroy(void)
2201 counter_u64_free(rack_ack_total);
2202 counter_u64_free(rack_express_sack);
2203 counter_u64_free(rack_sack_total);
2204 counter_u64_free(rack_move_none);
2205 counter_u64_free(rack_move_some);
2206 counter_u64_free(rack_sack_attacks_detected);
2207 counter_u64_free(rack_sack_attacks_reversed);
2208 counter_u64_free(rack_sack_used_next_merge);
2209 counter_u64_free(rack_sack_used_prev_merge);
2210 counter_u64_free(rack_badfr);
2211 counter_u64_free(rack_badfr_bytes);
2212 counter_u64_free(rack_rtm_prr_retran);
2213 counter_u64_free(rack_rtm_prr_newdata);
2214 counter_u64_free(rack_timestamp_mismatch);
2215 counter_u64_free(rack_find_high);
2216 counter_u64_free(rack_reorder_seen);
2217 counter_u64_free(rack_tlp_tot);
2218 counter_u64_free(rack_tlp_newdata);
2219 counter_u64_free(rack_tlp_retran);
2220 counter_u64_free(rack_tlp_retran_bytes);
2221 counter_u64_free(rack_tlp_retran_fail);
2222 counter_u64_free(rack_to_tot);
2223 counter_u64_free(rack_to_arm_rack);
2224 counter_u64_free(rack_to_arm_tlp);
2225 counter_u64_free(rack_calc_zero);
2226 counter_u64_free(rack_calc_nonzero);
2227 counter_u64_free(rack_paced_segments);
2228 counter_u64_free(rack_unpaced_segments);
2229 counter_u64_free(rack_saw_enobuf);
2230 counter_u64_free(rack_saw_enetunreach);
2231 counter_u64_free(rack_to_alloc);
2232 counter_u64_free(rack_to_alloc_hard);
2233 counter_u64_free(rack_to_alloc_emerg);
2234 counter_u64_free(rack_to_alloc_limited);
2235 counter_u64_free(rack_alloc_limited_conns);
2236 counter_u64_free(rack_split_limited);
2237 counter_u64_free(rack_sack_proc_all);
2238 counter_u64_free(rack_sack_proc_restart);
2239 counter_u64_free(rack_sack_proc_short);
2240 counter_u64_free(rack_enter_tlp_calc);
2241 counter_u64_free(rack_used_tlpmethod);
2242 counter_u64_free(rack_used_tlpmethod2);
2243 counter_u64_free(rack_sack_skipped_acked);
2244 counter_u64_free(rack_sack_splits);
2245 counter_u64_free(rack_progress_drops);
2246 counter_u64_free(rack_input_idle_reduces);
2247 counter_u64_free(rack_collapsed_win);
2248 counter_u64_free(rack_tlp_does_nada);
2249 counter_u64_free(rack_try_scwnd);
2250 counter_u64_free(rack_tls_rwnd);
2251 counter_u64_free(rack_tls_cwnd);
2252 counter_u64_free(rack_tls_app);
2253 counter_u64_free(rack_tls_other);
2254 counter_u64_free(rack_tls_filled);
2255 counter_u64_free(rack_tls_rxt);
2256 counter_u64_free(rack_tls_tlp);
2257 counter_u64_free(rack_per_timer_hole);
2258 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
2259 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
2262 static struct rack_sendmap *
2263 rack_alloc(struct tcp_rack *rack)
2265 struct rack_sendmap *rsm;
2267 rsm = uma_zalloc(rack_zone, M_NOWAIT);
2269 rack->r_ctl.rc_num_maps_alloced++;
2270 counter_u64_add(rack_to_alloc, 1);
2273 if (rack->rc_free_cnt) {
2274 counter_u64_add(rack_to_alloc_emerg, 1);
2275 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
2276 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2277 rack->rc_free_cnt--;
2283 static struct rack_sendmap *
2284 rack_alloc_full_limit(struct tcp_rack *rack)
2286 if ((V_tcp_map_entries_limit > 0) &&
2287 (rack->do_detection == 0) &&
2288 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
2289 counter_u64_add(rack_to_alloc_limited, 1);
2290 if (!rack->alloc_limit_reported) {
2291 rack->alloc_limit_reported = 1;
2292 counter_u64_add(rack_alloc_limited_conns, 1);
2296 return (rack_alloc(rack));
2299 /* wrapper to allocate a sendmap entry, subject to a specific limit */
2300 static struct rack_sendmap *
2301 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type)
2303 struct rack_sendmap *rsm;
2306 /* currently there is only one limit type */
2307 if (V_tcp_map_split_limit > 0 &&
2308 (rack->do_detection == 0) &&
2309 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) {
2310 counter_u64_add(rack_split_limited, 1);
2311 if (!rack->alloc_limit_reported) {
2312 rack->alloc_limit_reported = 1;
2313 counter_u64_add(rack_alloc_limited_conns, 1);
2319 /* allocate and mark in the limit type, if set */
2320 rsm = rack_alloc(rack);
2321 if (rsm != NULL && limit_type) {
2322 rsm->r_limit_type = limit_type;
2323 rack->r_ctl.rc_num_split_allocs++;
2329 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
2331 if (rsm->r_flags & RACK_APP_LIMITED) {
2332 if (rack->r_ctl.rc_app_limited_cnt > 0) {
2333 rack->r_ctl.rc_app_limited_cnt--;
2336 if (rsm->r_limit_type) {
2337 /* currently there is only one limit type */
2338 rack->r_ctl.rc_num_split_allocs--;
2340 if (rsm == rack->r_ctl.rc_first_appl) {
2341 if (rack->r_ctl.rc_app_limited_cnt == 0)
2342 rack->r_ctl.rc_first_appl = NULL;
2344 /* Follow the next one out */
2345 struct rack_sendmap fe;
2347 fe.r_start = rsm->r_nseq_appl;
2348 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
2351 if (rsm == rack->r_ctl.rc_resend)
2352 rack->r_ctl.rc_resend = NULL;
2353 if (rsm == rack->r_ctl.rc_rsm_at_retran)
2354 rack->r_ctl.rc_rsm_at_retran = NULL;
2355 if (rsm == rack->r_ctl.rc_end_appl)
2356 rack->r_ctl.rc_end_appl = NULL;
2357 if (rack->r_ctl.rc_tlpsend == rsm)
2358 rack->r_ctl.rc_tlpsend = NULL;
2359 if (rack->r_ctl.rc_sacklast == rsm)
2360 rack->r_ctl.rc_sacklast = NULL;
2361 if (rack->rc_free_cnt < rack_free_cache) {
2362 memset(rsm, 0, sizeof(struct rack_sendmap));
2363 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
2364 rsm->r_limit_type = 0;
2365 rack->rc_free_cnt++;
2368 rack->r_ctl.rc_num_maps_alloced--;
2369 uma_zfree(rack_zone, rsm);
2373 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack)
2375 uint64_t srtt, bw, len, tim;
2376 uint32_t segsiz, def_len, minl;
2378 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
2379 def_len = rack_def_data_window * segsiz;
2380 if (rack->rc_gp_filled == 0) {
2382 * We have no measurement (IW is in flight?) so
2383 * we can only guess using our data_window sysctl
2384 * value (usually 100MSS).
2389 * Now we have a number of factors to consider.
2391 * 1) We have a desired BDP which is usually
2393 * 2) We have a minimum number of rtt's usually 1 SRTT
2394 * but we allow it too to be more.
2395 * 3) We want to make sure a measurement last N useconds (if
2396 * we have set rack_min_measure_usec.
2398 * We handle the first concern here by trying to create a data
2399 * window of max(rack_def_data_window, DesiredBDP). The
2400 * second concern we handle in not letting the measurement
2401 * window end normally until at least the required SRTT's
2402 * have gone by which is done further below in
2403 * rack_enough_for_measurement(). Finally the third concern
2404 * we also handle here by calculating how long that time
2405 * would take at the current BW and then return the
2406 * max of our first calculation and that length. Note
2407 * that if rack_min_measure_usec is 0, we don't deal
2408 * with concern 3. Also for both Concern 1 and 3 an
2409 * application limited period could end the measurement
2412 * So lets calculate the BDP with the "known" b/w using
2413 * the SRTT has our rtt and then multiply it by the
2416 bw = rack_get_bw(rack);
2417 srtt = ((uint64_t)TICKS_2_USEC(tp->t_srtt) >> TCP_RTT_SHIFT);
2419 len /= (uint64_t)HPTS_USEC_IN_SEC;
2420 len *= max(1, rack_goal_bdp);
2421 /* Now we need to round up to the nearest MSS */
2422 len = roundup(len, segsiz);
2423 if (rack_min_measure_usec) {
2424 /* Now calculate our min length for this b/w */
2425 tim = rack_min_measure_usec;
2426 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC;
2429 minl = roundup(minl, segsiz);
2434 * Now if we have a very small window we want
2435 * to attempt to get the window that is
2436 * as small as possible. This happens on
2437 * low b/w connections and we don't want to
2438 * span huge numbers of rtt's between measurements.
2440 * We basically include 2 over our "MIN window" so
2441 * that the measurement can be shortened (possibly) by
2445 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz)));
2447 return (max((uint32_t)len, def_len));
2452 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack)
2454 uint32_t tim, srtts, segsiz;
2457 * Has enough time passed for the GP measurement to be valid?
2459 if ((tp->snd_max == tp->snd_una) ||
2460 (th_ack == tp->snd_max)){
2464 if (SEQ_LT(th_ack, tp->gput_seq)) {
2465 /* Not enough bytes yet */
2468 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
2469 if (SEQ_LT(th_ack, tp->gput_ack) &&
2470 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
2471 /* Not enough bytes yet */
2474 if (rack->r_ctl.rc_first_appl &&
2475 (rack->r_ctl.rc_first_appl->r_start == th_ack)) {
2477 * We are up to the app limited point
2478 * we have to measure irrespective of the time..
2482 /* Now what about time? */
2483 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts);
2484 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts;
2488 /* Nope not even a full SRTT has passed */
2494 rack_log_timely(struct tcp_rack *rack,
2495 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd,
2496 uint64_t up_bnd, int line, uint8_t method)
2498 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2499 union tcp_log_stackspecific log;
2502 memset(&log, 0, sizeof(log));
2503 log.u_bbr.flex1 = logged;
2504 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt;
2505 log.u_bbr.flex2 <<= 4;
2506 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt;
2507 log.u_bbr.flex2 <<= 4;
2508 log.u_bbr.flex2 |= rack->rc_gp_incr;
2509 log.u_bbr.flex2 <<= 4;
2510 log.u_bbr.flex2 |= rack->rc_gp_bwred;
2511 log.u_bbr.flex3 = rack->rc_gp_incr;
2512 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
2513 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca;
2514 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec;
2515 log.u_bbr.flex7 = rack->rc_gp_bwred;
2516 log.u_bbr.flex8 = method;
2517 log.u_bbr.cur_del_rate = cur_bw;
2518 log.u_bbr.delRate = low_bnd;
2519 log.u_bbr.bw_inuse = up_bnd;
2520 log.u_bbr.rttProp = rack_get_bw(rack);
2521 log.u_bbr.pkt_epoch = line;
2522 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
2523 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2524 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2525 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
2526 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
2527 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom;
2528 log.u_bbr.cwnd_gain <<= 1;
2529 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec;
2530 log.u_bbr.cwnd_gain <<= 1;
2531 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
2532 log.u_bbr.cwnd_gain <<= 1;
2533 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
2534 log.u_bbr.lost = rack->r_ctl.rc_loss_count;
2535 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2536 &rack->rc_inp->inp_socket->so_rcv,
2537 &rack->rc_inp->inp_socket->so_snd,
2539 0, &log, false, &tv);
2544 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult)
2547 * Before we increase we need to know if
2548 * the estimate just made was less than
2549 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est)
2551 * If we already are pacing at a fast enough
2552 * rate to push us faster there is no sense of
2555 * We first caculate our actual pacing rate (ss or ca multipler
2556 * times our cur_bw).
2558 * Then we take the last measured rate and multipy by our
2559 * maximum pacing overage to give us a max allowable rate.
2561 * If our act_rate is smaller than our max_allowable rate
2562 * then we should increase. Else we should hold steady.
2565 uint64_t act_rate, max_allow_rate;
2567 if (rack_timely_no_stopping)
2570 if ((cur_bw == 0) || (last_bw_est == 0)) {
2572 * Initial startup case or
2573 * everything is acked case.
2575 rack_log_timely(rack, mult, cur_bw, 0, 0,
2581 * We can always pace at or slightly above our rate.
2583 rack_log_timely(rack, mult, cur_bw, 0, 0,
2587 act_rate = cur_bw * (uint64_t)mult;
2589 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100);
2590 max_allow_rate /= 100;
2591 if (act_rate < max_allow_rate) {
2593 * Here the rate we are actually pacing at
2594 * is smaller than 10% above our last measurement.
2595 * This means we are pacing below what we would
2596 * like to try to achieve (plus some wiggle room).
2598 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
2603 * Here we are already pacing at least rack_max_per_above(10%)
2604 * what we are getting back. This indicates most likely
2605 * that we are being limited (cwnd/rwnd/app) and can't
2606 * get any more b/w. There is no sense of trying to
2607 * raise up the pacing rate its not speeding us up
2608 * and we already are pacing faster than we are getting.
2610 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
2617 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack)
2620 * When we drag bottom, we want to assure
2621 * that no multiplier is below 1.0, if so
2622 * we want to restore it to at least that.
2624 if (rack->r_ctl.rack_per_of_gp_rec < 100) {
2625 /* This is unlikely we usually do not touch recovery */
2626 rack->r_ctl.rack_per_of_gp_rec = 100;
2628 if (rack->r_ctl.rack_per_of_gp_ca < 100) {
2629 rack->r_ctl.rack_per_of_gp_ca = 100;
2631 if (rack->r_ctl.rack_per_of_gp_ss < 100) {
2632 rack->r_ctl.rack_per_of_gp_ss = 100;
2637 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack)
2639 if (rack->r_ctl.rack_per_of_gp_ca > 100) {
2640 rack->r_ctl.rack_per_of_gp_ca = 100;
2642 if (rack->r_ctl.rack_per_of_gp_ss > 100) {
2643 rack->r_ctl.rack_per_of_gp_ss = 100;
2648 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override)
2650 int32_t calc, logged, plus;
2656 * override is passed when we are
2657 * loosing b/w and making one last
2658 * gasp at trying to not loose out
2659 * to a new-reno flow.
2663 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */
2664 if (rack->rc_gp_incr &&
2665 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) {
2667 * Reset and get 5 strokes more before the boost. Note
2668 * that the count is 0 based so we have to add one.
2671 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST;
2672 rack->rc_gp_timely_inc_cnt = 0;
2674 plus = (uint32_t)rack_gp_increase_per;
2675 /* Must be at least 1% increase for true timely increases */
2677 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0)))
2679 if (rack->rc_gp_saw_rec &&
2680 (rack->rc_gp_no_rec_chg == 0) &&
2681 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
2682 rack->r_ctl.rack_per_of_gp_rec)) {
2683 /* We have been in recovery ding it too */
2684 calc = rack->r_ctl.rack_per_of_gp_rec + plus;
2688 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc;
2689 if (rack_per_upper_bound_ss &&
2690 (rack->rc_dragged_bottom == 0) &&
2691 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss))
2692 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss;
2694 if (rack->rc_gp_saw_ca &&
2695 (rack->rc_gp_saw_ss == 0) &&
2696 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
2697 rack->r_ctl.rack_per_of_gp_ca)) {
2699 calc = rack->r_ctl.rack_per_of_gp_ca + plus;
2703 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc;
2704 if (rack_per_upper_bound_ca &&
2705 (rack->rc_dragged_bottom == 0) &&
2706 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca))
2707 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca;
2709 if (rack->rc_gp_saw_ss &&
2710 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
2711 rack->r_ctl.rack_per_of_gp_ss)) {
2713 calc = rack->r_ctl.rack_per_of_gp_ss + plus;
2716 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc;
2717 if (rack_per_upper_bound_ss &&
2718 (rack->rc_dragged_bottom == 0) &&
2719 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss))
2720 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss;
2724 (rack->rc_gp_incr == 0)){
2725 /* Go into increment mode */
2726 rack->rc_gp_incr = 1;
2727 rack->rc_gp_timely_inc_cnt = 0;
2729 if (rack->rc_gp_incr &&
2731 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) {
2732 rack->rc_gp_timely_inc_cnt++;
2734 rack_log_timely(rack, logged, plus, 0, 0,
2739 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff)
2742 * norm_grad = rtt_diff / minrtt;
2743 * new_per = curper * (1 - B * norm_grad)
2745 * B = rack_gp_decrease_per (default 10%)
2746 * rtt_dif = input var current rtt-diff
2747 * curper = input var current percentage
2748 * minrtt = from rack filter
2753 perf = (((uint64_t)curper * ((uint64_t)1000000 -
2754 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 *
2755 (((uint64_t)rtt_diff * (uint64_t)1000000)/
2756 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/
2757 (uint64_t)1000000)) /
2759 if (perf > curper) {
2763 return ((uint32_t)perf);
2767 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt)
2771 * result = curper * (1 - (B * ( 1 - ------ ))
2774 * B = rack_gp_decrease_per (default 10%)
2775 * highrttthresh = filter_min * rack_gp_rtt_maxmul
2778 uint32_t highrttthresh;
2780 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
2782 perf = (((uint64_t)curper * ((uint64_t)1000000 -
2783 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 -
2784 ((uint64_t)highrttthresh * (uint64_t)1000000) /
2785 (uint64_t)rtt)) / 100)) /(uint64_t)1000000);
2791 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff)
2793 uint64_t logvar, logvar2, logvar3;
2794 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val;
2796 if (rack->rc_gp_incr) {
2797 /* Turn off increment counting */
2798 rack->rc_gp_incr = 0;
2799 rack->rc_gp_timely_inc_cnt = 0;
2801 ss_red = ca_red = rec_red = 0;
2803 /* Calculate the reduction value */
2807 /* Must be at least 1% reduction */
2808 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) {
2809 /* We have been in recovery ding it too */
2810 if (timely_says == 2) {
2811 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt);
2812 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
2818 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
2819 if (rack->r_ctl.rack_per_of_gp_rec > val) {
2820 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val);
2821 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val;
2823 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
2826 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec)
2827 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
2830 if (rack->rc_gp_saw_ss) {
2832 if (timely_says == 2) {
2833 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt);
2834 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
2840 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff);
2841 if (rack->r_ctl.rack_per_of_gp_ss > new_per) {
2842 ss_red = rack->r_ctl.rack_per_of_gp_ss - val;
2843 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val;
2846 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
2850 logvar2 = (uint32_t)rtt;
2852 logvar2 |= (uint32_t)rtt_diff;
2853 logvar3 = rack_gp_rtt_maxmul;
2855 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
2856 rack_log_timely(rack, timely_says,
2858 logvar, __LINE__, 10);
2860 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss)
2861 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
2863 } else if (rack->rc_gp_saw_ca) {
2865 if (timely_says == 2) {
2866 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt);
2867 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
2873 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff);
2874 if (rack->r_ctl.rack_per_of_gp_ca > val) {
2875 ca_red = rack->r_ctl.rack_per_of_gp_ca - val;
2876 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val;
2878 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
2883 logvar2 = (uint32_t)rtt;
2885 logvar2 |= (uint32_t)rtt_diff;
2886 logvar3 = rack_gp_rtt_maxmul;
2888 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
2889 rack_log_timely(rack, timely_says,
2891 logvar, __LINE__, 10);
2893 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca)
2894 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
2897 if (rack->rc_gp_timely_dec_cnt < 0x7) {
2898 rack->rc_gp_timely_dec_cnt++;
2899 if (rack_timely_dec_clear &&
2900 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear))
2901 rack->rc_gp_timely_dec_cnt = 0;
2906 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar,
2911 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts,
2912 uint32_t rtt, uint32_t line, uint8_t reas)
2914 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2915 union tcp_log_stackspecific log;
2918 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2919 log.u_bbr.flex1 = line;
2920 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts;
2921 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts;
2922 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
2923 log.u_bbr.flex5 = rtt;
2924 log.u_bbr.flex6 = rack->rc_highly_buffered;
2925 log.u_bbr.flex6 <<= 1;
2926 log.u_bbr.flex6 |= rack->forced_ack;
2927 log.u_bbr.flex6 <<= 1;
2928 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul;
2929 log.u_bbr.flex6 <<= 1;
2930 log.u_bbr.flex6 |= rack->in_probe_rtt;
2931 log.u_bbr.flex6 <<= 1;
2932 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt;
2933 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt;
2934 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca;
2935 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec;
2936 log.u_bbr.flex8 = reas;
2937 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2938 log.u_bbr.delRate = rack_get_bw(rack);
2939 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt;
2940 log.u_bbr.cur_del_rate <<= 32;
2941 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt;
2942 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered;
2943 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
2944 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2945 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
2946 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
2947 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts;
2948 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight;
2949 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
2950 log.u_bbr.rttProp = us_cts;
2951 log.u_bbr.rttProp <<= 32;
2952 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt;
2953 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2954 &rack->rc_inp->inp_socket->so_rcv,
2955 &rack->rc_inp->inp_socket->so_snd,
2956 BBR_LOG_RTT_SHRINKS, 0,
2957 0, &log, false, &rack->r_ctl.act_rcv_time);
2962 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt)
2966 bwdp = rack_get_bw(rack);
2967 bwdp *= (uint64_t)rtt;
2968 bwdp /= (uint64_t)HPTS_USEC_IN_SEC;
2969 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz);
2970 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) {
2972 * A window protocol must be able to have 4 packets
2973 * outstanding as the floor in order to function
2974 * (especially considering delayed ack :D).
2976 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs);
2981 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts)
2984 * ProbeRTT is a bit different in rack_pacing than in
2985 * BBR. It is like BBR in that it uses the lowering of
2986 * the RTT as a signal that we saw something new and
2987 * counts from there for how long between. But it is
2988 * different in that its quite simple. It does not
2989 * play with the cwnd and wait until we get down
2990 * to N segments outstanding and hold that for
2991 * 200ms. Instead it just sets the pacing reduction
2992 * rate to a set percentage (70 by default) and hold
2993 * that for a number of recent GP Srtt's.
2997 if (rack->rc_gp_dyn_mul == 0)
3000 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) {
3004 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
3005 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
3007 * Stop the goodput now, the idea here is
3008 * that future measurements with in_probe_rtt
3009 * won't register if they are not greater so
3010 * we want to get what info (if any) is available
3013 rack_do_goodput_measurement(rack->rc_tp, rack,
3014 rack->rc_tp->snd_una, __LINE__);
3016 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
3017 rack->r_ctl.rc_time_probertt_entered = us_cts;
3018 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
3019 rack->r_ctl.rc_pace_min_segs);
3020 rack->in_probe_rtt = 1;
3021 rack->measure_saw_probe_rtt = 1;
3022 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
3023 rack->r_ctl.rc_time_probertt_starts = 0;
3024 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt;
3025 if (rack_probertt_use_min_rtt_entry)
3026 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
3028 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt);
3029 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3030 __LINE__, RACK_RTTS_ENTERPROBE);
3034 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts)
3036 struct rack_sendmap *rsm;
3039 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
3040 rack->r_ctl.rc_pace_min_segs);
3041 rack->in_probe_rtt = 0;
3042 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
3043 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
3045 * Stop the goodput now, the idea here is
3046 * that future measurements with in_probe_rtt
3047 * won't register if they are not greater so
3048 * we want to get what info (if any) is available
3051 rack_do_goodput_measurement(rack->rc_tp, rack,
3052 rack->rc_tp->snd_una, __LINE__);
3053 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
3055 * We don't have enough data to make a measurement.
3056 * So lets just stop and start here after exiting
3057 * probe-rtt. We probably are not interested in
3058 * the results anyway.
3060 rack->rc_tp->t_flags &= ~TF_GPUTINPROG;
3063 * Measurements through the current snd_max are going
3064 * to be limited by the slower pacing rate.
3066 * We need to mark these as app-limited so we
3067 * don't collapse the b/w.
3069 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
3070 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
3071 if (rack->r_ctl.rc_app_limited_cnt == 0)
3072 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
3075 * Go out to the end app limited and mark
3076 * this new one as next and move the end_appl up
3079 if (rack->r_ctl.rc_end_appl)
3080 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
3081 rack->r_ctl.rc_end_appl = rsm;
3083 rsm->r_flags |= RACK_APP_LIMITED;
3084 rack->r_ctl.rc_app_limited_cnt++;
3087 * Now, we need to examine our pacing rate multipliers.
3088 * If its under 100%, we need to kick it back up to
3089 * 100%. We also don't let it be over our "max" above
3090 * the actual rate i.e. 100% + rack_clamp_atexit_prtt.
3091 * Note setting clamp_atexit_prtt to 0 has the effect
3092 * of setting CA/SS to 100% always at exit (which is
3093 * the default behavior).
3095 if (rack_probertt_clear_is) {
3096 rack->rc_gp_incr = 0;
3097 rack->rc_gp_bwred = 0;
3098 rack->rc_gp_timely_inc_cnt = 0;
3099 rack->rc_gp_timely_dec_cnt = 0;
3101 /* Do we do any clamping at exit? */
3102 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) {
3103 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp;
3104 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp;
3106 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) {
3107 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt;
3108 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt;
3111 * Lets set rtt_diff to 0, so that we will get a "boost"
3114 rack->r_ctl.rc_rtt_diff = 0;
3116 /* Clear all flags so we start fresh */
3117 rack->rc_tp->t_bytes_acked = 0;
3118 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND;
3120 * If configured to, set the cwnd and ssthresh to
3123 if (rack_probe_rtt_sets_cwnd) {
3127 /* Set ssthresh so we get into CA once we hit our target */
3128 if (rack_probertt_use_min_rtt_exit == 1) {
3129 /* Set to min rtt */
3130 rack_set_prtt_target(rack, segsiz,
3131 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
3132 } else if (rack_probertt_use_min_rtt_exit == 2) {
3133 /* Set to current gp rtt */
3134 rack_set_prtt_target(rack, segsiz,
3135 rack->r_ctl.rc_gp_srtt);
3136 } else if (rack_probertt_use_min_rtt_exit == 3) {
3137 /* Set to entry gp rtt */
3138 rack_set_prtt_target(rack, segsiz,
3139 rack->r_ctl.rc_entry_gp_rtt);
3144 sum = rack->r_ctl.rc_entry_gp_rtt;
3146 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt));
3149 * A highly buffered path needs
3150 * cwnd space for timely to work.
3151 * Lets set things up as if
3152 * we are heading back here again.
3154 setval = rack->r_ctl.rc_entry_gp_rtt;
3155 } else if (sum >= 15) {
3157 * Lets take the smaller of the
3158 * two since we are just somewhat
3161 setval = rack->r_ctl.rc_gp_srtt;
3162 if (setval > rack->r_ctl.rc_entry_gp_rtt)
3163 setval = rack->r_ctl.rc_entry_gp_rtt;
3166 * Here we are not highly buffered
3167 * and should pick the min we can to
3168 * keep from causing loss.
3170 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3172 rack_set_prtt_target(rack, segsiz,
3175 if (rack_probe_rtt_sets_cwnd > 1) {
3176 /* There is a percentage here to boost */
3177 ebdp = rack->r_ctl.rc_target_probertt_flight;
3178 ebdp *= rack_probe_rtt_sets_cwnd;
3180 setto = rack->r_ctl.rc_target_probertt_flight + ebdp;
3182 setto = rack->r_ctl.rc_target_probertt_flight;
3183 rack->rc_tp->snd_cwnd = roundup(setto, segsiz);
3184 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) {
3186 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs;
3188 /* If we set in the cwnd also set the ssthresh point so we are in CA */
3189 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1);
3191 rack_log_rtt_shrinks(rack, us_cts,
3192 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3193 __LINE__, RACK_RTTS_EXITPROBE);
3194 /* Clear times last so log has all the info */
3195 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max;
3196 rack->r_ctl.rc_time_probertt_entered = us_cts;
3197 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
3198 rack->r_ctl.rc_time_of_last_probertt = us_cts;
3202 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts)
3204 /* Check in on probe-rtt */
3205 if (rack->rc_gp_filled == 0) {
3206 /* We do not do p-rtt unless we have gp measurements */
3209 if (rack->in_probe_rtt) {
3210 uint64_t no_overflow;
3211 uint32_t endtime, must_stay;
3213 if (rack->r_ctl.rc_went_idle_time &&
3214 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) {
3216 * We went idle during prtt, just exit now.
3218 rack_exit_probertt(rack, us_cts);
3219 } else if (rack_probe_rtt_safety_val &&
3220 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) &&
3221 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) {
3223 * Probe RTT safety value triggered!
3225 rack_log_rtt_shrinks(rack, us_cts,
3226 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3227 __LINE__, RACK_RTTS_SAFETY);
3228 rack_exit_probertt(rack, us_cts);
3230 /* Calculate the max we will wait */
3231 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait);
3232 if (rack->rc_highly_buffered)
3233 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp);
3234 /* Calculate the min we must wait */
3235 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain);
3236 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) &&
3237 TSTMP_LT(us_cts, endtime)) {
3239 /* Do we lower more? */
3241 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered))
3242 calc = us_cts - rack->r_ctl.rc_time_probertt_entered;
3245 calc /= max(rack->r_ctl.rc_gp_srtt, 1);
3248 calc *= rack_per_of_gp_probertt_reduce;
3249 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc;
3251 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh)
3252 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh;
3254 /* We must reach target or the time set */
3257 if (rack->r_ctl.rc_time_probertt_starts == 0) {
3258 if ((TSTMP_LT(us_cts, must_stay) &&
3259 rack->rc_highly_buffered) ||
3260 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) >
3261 rack->r_ctl.rc_target_probertt_flight)) {
3262 /* We are not past the must_stay time */
3265 rack_log_rtt_shrinks(rack, us_cts,
3266 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3267 __LINE__, RACK_RTTS_REACHTARGET);
3268 rack->r_ctl.rc_time_probertt_starts = us_cts;
3269 if (rack->r_ctl.rc_time_probertt_starts == 0)
3270 rack->r_ctl.rc_time_probertt_starts = 1;
3271 /* Restore back to our rate we want to pace at in prtt */
3272 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
3275 * Setup our end time, some number of gp_srtts plus 200ms.
3277 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt *
3278 (uint64_t)rack_probertt_gpsrtt_cnt_mul);
3279 if (rack_probertt_gpsrtt_cnt_div)
3280 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div);
3283 endtime += rack_min_probertt_hold;
3284 endtime += rack->r_ctl.rc_time_probertt_starts;
3285 if (TSTMP_GEQ(us_cts, endtime)) {
3286 /* yes, exit probertt */
3287 rack_exit_probertt(rack, us_cts);
3290 } else if((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) {
3291 /* Go into probertt, its been too long since we went lower */
3292 rack_enter_probertt(rack, us_cts);
3297 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est,
3298 uint32_t rtt, int32_t rtt_diff)
3300 uint64_t cur_bw, up_bnd, low_bnd, subfr;
3303 if ((rack->rc_gp_dyn_mul == 0) ||
3304 (rack->use_fixed_rate) ||
3305 (rack->in_probe_rtt) ||
3306 (rack->rc_always_pace == 0)) {
3307 /* No dynamic GP multipler in play */
3310 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start;
3311 cur_bw = rack_get_bw(rack);
3312 /* Calculate our up and down range */
3313 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up;
3315 up_bnd += rack->r_ctl.last_gp_comp_bw;
3317 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down;
3319 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr;
3320 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) {
3322 * This is the case where our RTT is above
3323 * the max target and we have been configured
3324 * to just do timely no bonus up stuff in that case.
3326 * There are two configurations, set to 1, and we
3327 * just do timely if we are over our max. If its
3328 * set above 1 then we slam the multipliers down
3329 * to 100 and then decrement per timely.
3331 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3333 if (rack->r_ctl.rc_no_push_at_mrtt > 1)
3334 rack_validate_multipliers_at_or_below_100(rack);
3335 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
3336 } else if ((last_bw_est < low_bnd) && !losses) {
3338 * We are decreasing this is a bit complicated this
3339 * means we are loosing ground. This could be
3340 * because another flow entered and we are competing
3341 * for b/w with it. This will push the RTT up which
3342 * makes timely unusable unless we want to get shoved
3343 * into a corner and just be backed off (the age
3344 * old problem with delay based CC).
3346 * On the other hand if it was a route change we
3347 * would like to stay somewhat contained and not
3348 * blow out the buffers.
3350 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3352 rack->r_ctl.last_gp_comp_bw = cur_bw;
3353 if (rack->rc_gp_bwred == 0) {
3354 /* Go into reduction counting */
3355 rack->rc_gp_bwred = 1;
3356 rack->rc_gp_timely_dec_cnt = 0;
3358 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) ||
3359 (timely_says == 0)) {
3361 * Push another time with a faster pacing
3362 * to try to gain back (we include override to
3363 * get a full raise factor).
3365 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) ||
3366 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) ||
3367 (timely_says == 0) ||
3368 (rack_down_raise_thresh == 0)) {
3370 * Do an override up in b/w if we were
3371 * below the threshold or if the threshold
3372 * is zero we always do the raise.
3374 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1);
3376 /* Log it stays the same */
3377 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0,
3381 rack->rc_gp_timely_dec_cnt++;
3382 /* We are not incrementing really no-count */
3383 rack->rc_gp_incr = 0;
3384 rack->rc_gp_timely_inc_cnt = 0;
3387 * Lets just use the RTT
3388 * information and give up
3393 } else if ((timely_says != 2) &&
3395 (last_bw_est > up_bnd)) {
3397 * We are increasing b/w lets keep going, updating
3398 * our b/w and ignoring any timely input, unless
3399 * of course we are at our max raise (if there is one).
3402 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3404 rack->r_ctl.last_gp_comp_bw = cur_bw;
3405 if (rack->rc_gp_saw_ss &&
3406 rack_per_upper_bound_ss &&
3407 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) {
3409 * In cases where we can't go higher
3410 * we should just use timely.
3414 if (rack->rc_gp_saw_ca &&
3415 rack_per_upper_bound_ca &&
3416 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) {
3418 * In cases where we can't go higher
3419 * we should just use timely.
3423 rack->rc_gp_bwred = 0;
3424 rack->rc_gp_timely_dec_cnt = 0;
3425 /* You get a set number of pushes if timely is trying to reduce */
3426 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) {
3427 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
3429 /* Log it stays the same */
3430 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0,
3437 * We are staying between the lower and upper range bounds
3438 * so use timely to decide.
3440 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3444 rack->rc_gp_incr = 0;
3445 rack->rc_gp_timely_inc_cnt = 0;
3446 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) &&
3448 (last_bw_est < low_bnd)) {
3449 /* We are loosing ground */
3450 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
3451 rack->rc_gp_timely_dec_cnt++;
3452 /* We are not incrementing really no-count */
3453 rack->rc_gp_incr = 0;
3454 rack->rc_gp_timely_inc_cnt = 0;
3456 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
3458 rack->rc_gp_bwred = 0;
3459 rack->rc_gp_timely_dec_cnt = 0;
3460 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
3466 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt)
3468 int32_t timely_says;
3469 uint64_t log_mult, log_rtt_a_diff;
3471 log_rtt_a_diff = rtt;
3472 log_rtt_a_diff <<= 32;
3473 log_rtt_a_diff |= (uint32_t)rtt_diff;
3474 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) *
3475 rack_gp_rtt_maxmul)) {
3476 /* Reduce the b/w multipler */
3478 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
3480 log_mult |= prev_rtt;
3481 rack_log_timely(rack, timely_says, log_mult,
3482 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3483 log_rtt_a_diff, __LINE__, 4);
3484 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
3485 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
3486 max(rack_gp_rtt_mindiv , 1)))) {
3487 /* Increase the b/w multipler */
3488 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
3489 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
3490 max(rack_gp_rtt_mindiv , 1));
3492 log_mult |= prev_rtt;
3494 rack_log_timely(rack, timely_says, log_mult ,
3495 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3496 log_rtt_a_diff, __LINE__, 5);
3499 * Use a gradient to find it the timely gradient
3501 * grad = rc_rtt_diff / min_rtt;
3503 * anything below or equal to 0 will be
3504 * a increase indication. Anything above
3505 * zero is a decrease. Note we take care
3506 * of the actual gradient calculation
3507 * in the reduction (its not needed for
3510 log_mult = prev_rtt;
3511 if (rtt_diff <= 0) {
3513 * Rttdiff is less than zero, increase the
3514 * b/w multipler (its 0 or negative)
3517 rack_log_timely(rack, timely_says, log_mult,
3518 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6);
3520 /* Reduce the b/w multipler */
3522 rack_log_timely(rack, timely_says, log_mult,
3523 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7);
3526 return (timely_says);
3530 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
3531 tcp_seq th_ack, int line)
3533 uint64_t tim, bytes_ps, ltim, stim, utim;
3534 uint32_t segsiz, bytes, reqbytes, us_cts;
3535 int32_t gput, new_rtt_diff, timely_says;
3537 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
3538 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
3539 if (TSTMP_GEQ(us_cts, tp->gput_ts))
3540 tim = us_cts - tp->gput_ts;
3544 if (TSTMP_GT(rack->r_ctl.rc_gp_cumack_ts, rack->r_ctl.rc_gp_output_ts))
3545 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts;
3549 * Use the larger of the send time or ack time. This prevents us
3550 * from being influenced by ack artifacts to come up with too
3551 * high of measurement. Note that since we are spanning over many more
3552 * bytes in most of our measurements hopefully that is less likely to
3558 utim = max(stim, 1);
3559 /* Lets validate utim */
3560 ltim = max(1, (utim/HPTS_USEC_IN_MSEC));
3561 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim;
3562 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz));
3563 if ((tim == 0) && (stim == 0)) {
3565 * Invalid measurement time, maybe
3566 * all on one ack/one send?
3570 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
3571 0, 0, 0, 10, __LINE__, NULL);
3572 goto skip_measurement;
3574 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) {
3575 /* We never made a us_rtt measurement? */
3578 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
3579 0, 0, 0, 10, __LINE__, NULL);
3580 goto skip_measurement;
3583 * Calculate the maximum possible b/w this connection
3584 * could have. We base our calculation on the lowest
3585 * rtt we have seen during the measurement and the
3586 * largest rwnd the client has given us in that time. This
3587 * forms a BDP that is the maximum that we could ever
3588 * get to the client. Anything larger is not valid.
3590 * I originally had code here that rejected measurements
3591 * where the time was less than 1/2 the latest us_rtt.
3592 * But after thinking on that I realized its wrong since
3593 * say you had a 150Mbps or even 1Gbps link, and you
3594 * were a long way away.. example I am in Europe (100ms rtt)
3595 * talking to my 1Gbps link in S.C. Now measuring say 150,000
3596 * bytes my time would be 1.2ms, and yet my rtt would say
3597 * the measurement was invalid the time was < 50ms. The
3598 * same thing is true for 150Mb (8ms of time).
3600 * A better way I realized is to look at what the maximum
3601 * the connection could possibly do. This is gated on
3602 * the lowest RTT we have seen and the highest rwnd.
3603 * We should in theory never exceed that, if we are
3604 * then something on the path is storing up packets
3605 * and then feeding them all at once to our endpoint
3606 * messing up our measurement.
3608 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd;
3609 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC;
3610 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt;
3611 if (SEQ_LT(th_ack, tp->gput_seq)) {
3612 /* No measurement can be made */
3615 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
3616 0, 0, 0, 10, __LINE__, NULL);
3617 goto skip_measurement;
3619 bytes = (th_ack - tp->gput_seq);
3620 bytes_ps = (uint64_t)bytes;
3622 * Don't measure a b/w for pacing unless we have gotten at least
3623 * an initial windows worth of data in this measurement interval.
3625 * Small numbers of bytes get badly influenced by delayed ack and
3626 * other artifacts. Note we take the initial window or our
3627 * defined minimum GP (defaulting to 10 which hopefully is the
3630 if (rack->rc_gp_filled == 0) {
3632 * The initial estimate is special. We
3633 * have blasted out an IW worth of packets
3634 * without a real valid ack ts results. We
3635 * then setup the app_limited_needs_set flag,
3636 * this should get the first ack in (probably 2
3637 * MSS worth) to be recorded as the timestamp.
3638 * We thus allow a smaller number of bytes i.e.
3641 reqbytes -= (2 * segsiz);
3642 /* Also lets fill previous for our first measurement to be neutral */
3643 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
3645 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) {
3646 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
3647 rack->r_ctl.rc_app_limited_cnt,
3648 0, 0, 10, __LINE__, NULL);
3649 goto skip_measurement;
3652 * We now need to calculate the Timely like status so
3653 * we can update (possibly) the b/w multipliers.
3655 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt;
3656 if (rack->rc_gp_filled == 0) {
3657 /* No previous reading */
3658 rack->r_ctl.rc_rtt_diff = new_rtt_diff;
3660 if (rack->measure_saw_probe_rtt == 0) {
3662 * We don't want a probertt to be counted
3663 * since it will be negative incorrectly. We
3664 * expect to be reducing the RTT when we
3665 * pace at a slower rate.
3667 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8);
3668 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8);
3671 timely_says = rack_make_timely_judgement(rack,
3672 rack->r_ctl.rc_gp_srtt,
3673 rack->r_ctl.rc_rtt_diff,
3674 rack->r_ctl.rc_prev_gp_srtt
3676 bytes_ps *= HPTS_USEC_IN_SEC;
3678 if (bytes_ps > rack->r_ctl.last_max_bw) {
3680 * Something is on path playing
3681 * since this b/w is not possible based
3682 * on our BDP (highest rwnd and lowest rtt
3683 * we saw in the measurement window).
3685 * Another option here would be to
3686 * instead skip the measurement.
3688 rack_log_pacing_delay_calc(rack, bytes, reqbytes,
3689 bytes_ps, rack->r_ctl.last_max_bw, 0,
3690 11, __LINE__, NULL);
3691 bytes_ps = rack->r_ctl.last_max_bw;
3693 /* We store gp for b/w in bytes per second */
3694 if (rack->rc_gp_filled == 0) {
3695 /* Initial measurment */
3697 rack->r_ctl.gp_bw = bytes_ps;
3698 rack->rc_gp_filled = 1;
3699 rack->r_ctl.num_avg = 1;
3700 rack_set_pace_segments(rack->rc_tp, rack, __LINE__);
3702 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
3703 rack->r_ctl.rc_app_limited_cnt,
3704 0, 0, 10, __LINE__, NULL);
3706 if (rack->rc_inp->inp_in_hpts &&
3707 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
3709 * Ok we can't trust the pacer in this case
3710 * where we transition from un-paced to paced.
3711 * Or for that matter when the burst mitigation
3712 * was making a wild guess and got it wrong.
3713 * Stop the pacer and clear up all the aggregate
3716 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
3717 rack->r_ctl.rc_hpts_flags = 0;
3718 rack->r_ctl.rc_last_output_to = 0;
3720 } else if (rack->r_ctl.num_avg < RACK_REQ_AVG) {
3721 /* Still a small number run an average */
3722 rack->r_ctl.gp_bw += bytes_ps;
3723 rack->r_ctl.num_avg++;
3724 if (rack->r_ctl.num_avg >= RACK_REQ_AVG) {
3725 /* We have collected enought to move forward */
3726 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_avg;
3730 * We want to take 1/wma of the goodput and add in to 7/8th
3731 * of the old value weighted by the srtt. So if your measurement
3732 * period is say 2 SRTT's long you would get 1/4 as the
3733 * value, if it was like 1/2 SRTT then you would get 1/16th.
3735 * But we must be careful not to take too much i.e. if the
3736 * srtt is say 20ms and the measurement is taken over
3737 * 400ms our weight would be 400/20 i.e. 20. On the
3738 * other hand if we get a measurement over 1ms with a
3739 * 10ms rtt we only want to take a much smaller portion.
3741 uint64_t resid_bw, subpart, addpart, srtt;
3743 srtt = ((uint64_t)TICKS_2_USEC(tp->t_srtt) >> TCP_RTT_SHIFT);
3746 * Strange why did t_srtt go back to zero?
3748 if (rack->r_ctl.rc_rack_min_rtt)
3749 srtt = (rack->r_ctl.rc_rack_min_rtt * HPTS_USEC_IN_MSEC);
3751 srtt = HPTS_USEC_IN_MSEC;
3754 * XXXrrs: Note for reviewers, in playing with
3755 * dynamic pacing I discovered this GP calculation
3756 * as done originally leads to some undesired results.
3757 * Basically you can get longer measurements contributing
3758 * too much to the WMA. Thus I changed it if you are doing
3759 * dynamic adjustments to only do the aportioned adjustment
3760 * if we have a very small (time wise) measurement. Longer
3761 * measurements just get there weight (defaulting to 1/8)
3762 * add to the WMA. We may want to think about changing
3763 * this to always do that for both sides i.e. dynamic
3764 * and non-dynamic... but considering lots of folks
3765 * were playing with this I did not want to change the
3766 * calculation per.se. without your thoughts.. Lawerence?
3769 if (rack->rc_gp_dyn_mul == 0) {
3770 subpart = rack->r_ctl.gp_bw * utim;
3771 subpart /= (srtt * 8);
3772 if (subpart < (rack->r_ctl.gp_bw / 2)) {
3774 * The b/w update takes no more
3775 * away then 1/2 our running total
3778 addpart = bytes_ps * utim;
3779 addpart /= (srtt * 8);
3782 * Don't allow a single measurement
3783 * to account for more than 1/2 of the
3784 * WMA. This could happen on a retransmission
3785 * where utim becomes huge compared to
3786 * srtt (multiple retransmissions when using
3787 * the sending rate which factors in all the
3788 * transmissions from the first one).
3790 subpart = rack->r_ctl.gp_bw / 2;
3791 addpart = bytes_ps / 2;
3793 resid_bw = rack->r_ctl.gp_bw - subpart;
3794 rack->r_ctl.gp_bw = resid_bw + addpart;
3796 if ((utim / srtt) <= 1) {
3798 * The b/w update was over a small period
3799 * of time. The idea here is to prevent a small
3800 * measurement time period from counting
3801 * too much. So we scale it based on the
3802 * time so it attributes less than 1/rack_wma_divisor
3803 * of its measurement.
3805 subpart = rack->r_ctl.gp_bw * utim;
3806 subpart /= (srtt * rack_wma_divisor);
3807 addpart = bytes_ps * utim;
3808 addpart /= (srtt * rack_wma_divisor);
3811 * The scaled measurement was long
3812 * enough so lets just add in the
3813 * portion of the measurment i.e. 1/rack_wma_divisor
3815 subpart = rack->r_ctl.gp_bw / rack_wma_divisor;
3816 addpart = bytes_ps / rack_wma_divisor;
3818 if ((rack->measure_saw_probe_rtt == 0) ||
3819 (bytes_ps > rack->r_ctl.gp_bw)) {
3821 * For probe-rtt we only add it in
3822 * if its larger, all others we just
3825 resid_bw = rack->r_ctl.gp_bw - subpart;
3826 rack->r_ctl.gp_bw = resid_bw + addpart;
3830 /* We do not update any multipliers if we are in or have seen a probe-rtt */
3831 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set)
3832 rack_update_multiplier(rack, timely_says, bytes_ps,
3833 rack->r_ctl.rc_gp_srtt,
3834 rack->r_ctl.rc_rtt_diff);
3835 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim,
3836 rack_get_bw(rack), 3, line, NULL);
3837 /* reset the gp srtt and setup the new prev */
3838 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
3839 /* Record the lost count for the next measurement */
3840 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count;
3842 * We restart our diffs based on the gpsrtt in the
3843 * measurement window.
3845 rack->rc_gp_rtt_set = 0;
3846 rack->rc_gp_saw_rec = 0;
3847 rack->rc_gp_saw_ca = 0;
3848 rack->rc_gp_saw_ss = 0;
3849 rack->rc_dragged_bottom = 0;
3853 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
3856 * XXXLAS: This is a temporary hack, and should be
3857 * chained off VOI_TCP_GPUT when stats(9) grows an
3858 * API to deal with chained VOIs.
3860 if (tp->t_stats_gput_prev > 0)
3861 stats_voi_update_abs_s32(tp->t_stats,
3863 ((gput - tp->t_stats_gput_prev) * 100) /
3864 tp->t_stats_gput_prev);
3866 tp->t_flags &= ~TF_GPUTINPROG;
3867 tp->t_stats_gput_prev = gput;
3869 * Now are we app limited now and there is space from where we
3870 * were to where we want to go?
3872 * We don't do the other case i.e. non-applimited here since
3873 * the next send will trigger us picking up the missing data.
3875 if (rack->r_ctl.rc_first_appl &&
3876 TCPS_HAVEESTABLISHED(tp->t_state) &&
3877 rack->r_ctl.rc_app_limited_cnt &&
3878 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) &&
3879 ((rack->r_ctl.rc_first_appl->r_start - th_ack) >
3880 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
3882 * Yep there is enough outstanding to make a measurement here.
3884 struct rack_sendmap *rsm, fe;
3886 tp->t_flags |= TF_GPUTINPROG;
3887 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
3888 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
3889 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
3890 rack->app_limited_needs_set = 0;
3891 tp->gput_seq = th_ack;
3892 if (rack->in_probe_rtt)
3893 rack->measure_saw_probe_rtt = 1;
3894 else if ((rack->measure_saw_probe_rtt) &&
3895 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
3896 rack->measure_saw_probe_rtt = 0;
3897 if ((rack->r_ctl.rc_first_appl->r_start - th_ack) >= rack_get_measure_window(tp, rack)) {
3898 /* There is a full window to gain info from */
3899 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
3901 /* We can only measure up to the applimited point */
3902 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_start - th_ack);
3905 * Now we need to find the timestamp of the send at tp->gput_seq
3906 * for the send based measurement.
3908 fe.r_start = tp->gput_seq;
3909 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
3911 /* Ok send-based limit is set */
3912 if (SEQ_LT(rsm->r_start, tp->gput_seq)) {
3914 * Move back to include the earlier part
3915 * so our ack time lines up right (this may
3916 * make an overlapping measurement but thats
3919 tp->gput_seq = rsm->r_start;
3921 if (rsm->r_flags & RACK_ACKED)
3922 tp->gput_ts = rsm->r_ack_arrival;
3924 rack->app_limited_needs_set = 1;
3925 rack->r_ctl.rc_gp_output_ts = rsm->usec_orig_send;
3928 * If we don't find the rsm due to some
3929 * send-limit set the current time, which
3930 * basically disables the send-limit.
3932 rack->r_ctl.rc_gp_output_ts = tcp_get_usecs(NULL);
3934 rack_log_pacing_delay_calc(rack,
3939 rack->r_ctl.rc_app_limited_cnt,
3946 * CC wrapper hook functions
3949 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, struct tcphdr *th, uint16_t nsegs,
3950 uint16_t type, int32_t recovery)
3952 INP_WLOCK_ASSERT(tp->t_inpcb);
3953 tp->ccv->nsegs = nsegs;
3954 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
3955 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
3958 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp);
3959 if (tp->ccv->bytes_this_ack > max) {
3960 tp->ccv->bytes_this_ack = max;
3963 if (rack->r_ctl.cwnd_to_use <= tp->snd_wnd)
3964 tp->ccv->flags |= CCF_CWND_LIMITED;
3966 tp->ccv->flags &= ~CCF_CWND_LIMITED;
3968 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
3969 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd);
3971 if ((tp->t_flags & TF_GPUTINPROG) &&
3972 rack_enough_for_measurement(tp, rack, th->th_ack)) {
3973 /* Measure the Goodput */
3974 rack_do_goodput_measurement(tp, rack, th->th_ack, __LINE__);
3975 #ifdef NETFLIX_PEAKRATE
3976 if ((type == CC_ACK) &&
3977 (tp->t_maxpeakrate)) {
3979 * We update t_peakrate_thr. This gives us roughly
3980 * one update per round trip time. Note
3981 * it will only be used if pace_always is off i.e
3982 * we don't do this for paced flows.
3984 tcp_update_peakrate_thr(tp);
3988 if (rack->r_ctl.cwnd_to_use > tp->snd_ssthresh) {
3989 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
3990 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp));
3991 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) {
3992 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use;
3993 tp->ccv->flags |= CCF_ABC_SENTAWND;
3996 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
3997 tp->t_bytes_acked = 0;
3999 if (CC_ALGO(tp)->ack_received != NULL) {
4000 /* XXXLAS: Find a way to live without this */
4001 tp->ccv->curack = th->th_ack;
4002 CC_ALGO(tp)->ack_received(tp->ccv, type);
4005 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use);
4007 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) {
4008 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use;
4010 #ifdef NETFLIX_PEAKRATE
4011 /* we enforce max peak rate if it is set and we are not pacing */
4012 if ((rack->rc_always_pace == 0) &&
4013 tp->t_peakrate_thr &&
4014 (tp->snd_cwnd > tp->t_peakrate_thr)) {
4015 tp->snd_cwnd = tp->t_peakrate_thr;
4021 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th)
4023 struct tcp_rack *rack;
4025 rack = (struct tcp_rack *)tp->t_fb_ptr;
4026 INP_WLOCK_ASSERT(tp->t_inpcb);
4028 * If we are doing PRR and have enough
4029 * room to send <or> we are pacing and prr
4030 * is disabled we will want to see if we
4031 * can send data (by setting r_wanted_output to
4034 if ((rack->r_ctl.rc_prr_sndcnt > 0) ||
4036 rack->r_wanted_output = 1;
4040 rack_post_recovery(struct tcpcb *tp, struct tcphdr *th)
4042 struct tcp_rack *rack;
4046 orig_cwnd = tp->snd_cwnd;
4047 INP_WLOCK_ASSERT(tp->t_inpcb);
4048 rack = (struct tcp_rack *)tp->t_fb_ptr;
4049 if (rack->rc_not_backing_off == 0) {
4050 /* only alert CC if we alerted when we entered */
4051 if (CC_ALGO(tp)->post_recovery != NULL) {
4052 tp->ccv->curack = th->th_ack;
4053 CC_ALGO(tp)->post_recovery(tp->ccv);
4055 if (tp->snd_cwnd > tp->snd_ssthresh) {
4056 /* Drop us down to the ssthresh (1/2 cwnd at loss) */
4057 tp->snd_cwnd = tp->snd_ssthresh;
4060 if ((rack->rack_no_prr == 0) &&
4061 (rack->r_ctl.rc_prr_sndcnt > 0)) {
4062 /* Suck the next prr cnt back into cwnd */
4063 tp->snd_cwnd += rack->r_ctl.rc_prr_sndcnt;
4064 rack->r_ctl.rc_prr_sndcnt = 0;
4065 rack_log_to_prr(rack, 1, 0);
4067 rack_log_to_prr(rack, 14, orig_cwnd);
4068 tp->snd_recover = tp->snd_una;
4069 EXIT_RECOVERY(tp->t_flags);
4073 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
4075 struct tcp_rack *rack;
4077 INP_WLOCK_ASSERT(tp->t_inpcb);
4079 rack = (struct tcp_rack *)tp->t_fb_ptr;
4082 tp->t_flags &= ~TF_WASFRECOVERY;
4083 tp->t_flags &= ~TF_WASCRECOVERY;
4084 if (!IN_FASTRECOVERY(tp->t_flags)) {
4085 rack->r_ctl.rc_prr_delivered = 0;
4086 rack->r_ctl.rc_prr_out = 0;
4087 if (rack->rack_no_prr == 0) {
4088 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
4089 rack_log_to_prr(rack, 2, 0);
4091 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
4092 tp->snd_recover = tp->snd_max;
4093 if (tp->t_flags2 & TF2_ECN_PERMIT)
4094 tp->t_flags2 |= TF2_ECN_SND_CWR;
4098 if (!IN_CONGRECOVERY(tp->t_flags) ||
4100 * Allow ECN reaction on ACK to CWR, if
4101 * that data segment was also CE marked.
4103 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
4104 EXIT_CONGRECOVERY(tp->t_flags);
4105 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd);
4106 tp->snd_recover = tp->snd_max + 1;
4107 if (tp->t_flags2 & TF2_ECN_PERMIT)
4108 tp->t_flags2 |= TF2_ECN_SND_CWR;
4113 tp->t_bytes_acked = 0;
4114 EXIT_RECOVERY(tp->t_flags);
4115 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 /
4116 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp);
4117 tp->snd_cwnd = ctf_fixed_maxseg(tp);
4118 if (tp->t_flags2 & TF2_ECN_PERMIT)
4119 tp->t_flags2 |= TF2_ECN_SND_CWR;
4122 KMOD_TCPSTAT_INC(tcps_sndrexmitbad);
4123 /* RTO was unnecessary, so reset everything. */
4124 tp->snd_cwnd = tp->snd_cwnd_prev;
4125 tp->snd_ssthresh = tp->snd_ssthresh_prev;
4126 tp->snd_recover = tp->snd_recover_prev;
4127 if (tp->t_flags & TF_WASFRECOVERY) {
4128 ENTER_FASTRECOVERY(tp->t_flags);
4129 tp->t_flags &= ~TF_WASFRECOVERY;
4131 if (tp->t_flags & TF_WASCRECOVERY) {
4132 ENTER_CONGRECOVERY(tp->t_flags);
4133 tp->t_flags &= ~TF_WASCRECOVERY;
4135 tp->snd_nxt = tp->snd_max;
4136 tp->t_badrxtwin = 0;
4140 * If we are below our max rtt, don't
4141 * signal the CC control to change things.
4142 * instead set it up so that we are in
4143 * recovery but not going to back off.
4146 if (rack->rc_highly_buffered) {
4148 * Do we use the higher rtt for
4149 * our threshold to not backoff (like CDG)?
4151 uint32_t rtt_mul, rtt_div;
4153 if (rack_use_max_for_nobackoff) {
4154 rtt_mul = (rack_gp_rtt_maxmul - 1);
4157 rtt_mul = rack_gp_rtt_minmul;
4158 rtt_div = max(rack_gp_rtt_mindiv , 1);
4160 if (rack->r_ctl.rc_gp_srtt <= (rack->r_ctl.rc_lowest_us_rtt +
4161 ((rack->r_ctl.rc_lowest_us_rtt * rtt_mul) /
4163 /* below our min threshold */
4164 rack->rc_not_backing_off = 1;
4165 ENTER_RECOVERY(rack->rc_tp->t_flags);
4166 rack_log_rtt_shrinks(rack, 0,
4169 RACK_RTTS_NOBACKOFF);
4173 rack->rc_not_backing_off = 0;
4174 if (CC_ALGO(tp)->cong_signal != NULL) {
4176 tp->ccv->curack = th->th_ack;
4177 CC_ALGO(tp)->cong_signal(tp->ccv, type);
4184 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp)
4188 INP_WLOCK_ASSERT(tp->t_inpcb);
4190 #ifdef NETFLIX_STATS
4191 KMOD_TCPSTAT_INC(tcps_idle_restarts);
4192 if (tp->t_state == TCPS_ESTABLISHED)
4193 KMOD_TCPSTAT_INC(tcps_idle_estrestarts);
4195 if (CC_ALGO(tp)->after_idle != NULL)
4196 CC_ALGO(tp)->after_idle(tp->ccv);
4198 if (tp->snd_cwnd == 1)
4199 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
4201 i_cwnd = rc_init_window(rack);
4204 * Being idle is no differnt than the initial window. If the cc
4205 * clamps it down below the initial window raise it to the initial
4208 if (tp->snd_cwnd < i_cwnd) {
4209 tp->snd_cwnd = i_cwnd;
4215 * Indicate whether this ack should be delayed. We can delay the ack if
4216 * following conditions are met:
4217 * - There is no delayed ack timer in progress.
4218 * - Our last ack wasn't a 0-sized window. We never want to delay
4219 * the ack that opens up a 0-sized window.
4220 * - LRO wasn't used for this segment. We make sure by checking that the
4221 * segment size is not larger than the MSS.
4222 * - Delayed acks are enabled or this is a half-synchronized T/TCP
4225 #define DELAY_ACK(tp, tlen) \
4226 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
4227 ((tp->t_flags & TF_DELACK) == 0) && \
4228 (tlen <= tp->t_maxseg) && \
4229 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
4231 static struct rack_sendmap *
4232 rack_find_lowest_rsm(struct tcp_rack *rack)
4234 struct rack_sendmap *rsm;
4237 * Walk the time-order transmitted list looking for an rsm that is
4238 * not acked. This will be the one that was sent the longest time
4239 * ago that is still outstanding.
4241 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
4242 if (rsm->r_flags & RACK_ACKED) {
4251 static struct rack_sendmap *
4252 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
4254 struct rack_sendmap *prsm;
4257 * Walk the sequence order list backward until we hit and arrive at
4258 * the highest seq not acked. In theory when this is called it
4259 * should be the last segment (which it was not).
4261 counter_u64_add(rack_find_high, 1);
4263 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) {
4264 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
4274 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts)
4280 * lro is the flag we use to determine if we have seen reordering.
4281 * If it gets set we have seen reordering. The reorder logic either
4282 * works in one of two ways:
4284 * If reorder-fade is configured, then we track the last time we saw
4285 * re-ordering occur. If we reach the point where enough time as
4286 * passed we no longer consider reordering has occuring.
4288 * Or if reorder-face is 0, then once we see reordering we consider
4289 * the connection to alway be subject to reordering and just set lro
4292 * In the end if lro is non-zero we add the extra time for
4297 if (rack->r_ctl.rc_reorder_ts) {
4298 if (rack->r_ctl.rc_reorder_fade) {
4299 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
4300 lro = cts - rack->r_ctl.rc_reorder_ts;
4303 * No time as passed since the last
4304 * reorder, mark it as reordering.
4309 /* Negative time? */
4312 if (lro > rack->r_ctl.rc_reorder_fade) {
4313 /* Turn off reordering seen too */
4314 rack->r_ctl.rc_reorder_ts = 0;
4318 /* Reodering does not fade */
4324 thresh = srtt + rack->r_ctl.rc_pkt_delay;
4326 /* It must be set, if not you get 1/4 rtt */
4327 if (rack->r_ctl.rc_reorder_shift)
4328 thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
4330 thresh += (srtt >> 2);
4334 /* We don't let the rack timeout be above a RTO */
4335 if (thresh > TICKS_2_MSEC(rack->rc_tp->t_rxtcur)) {
4336 thresh = TICKS_2_MSEC(rack->rc_tp->t_rxtcur);
4338 /* And we don't want it above the RTO max either */
4339 if (thresh > rack_rto_max) {
4340 thresh = rack_rto_max;
4346 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
4347 struct rack_sendmap *rsm, uint32_t srtt)
4349 struct rack_sendmap *prsm;
4350 uint32_t thresh, len;
4355 if (rack->r_ctl.rc_tlp_threshold)
4356 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
4358 thresh = (srtt * 2);
4360 /* Get the previous sent packet, if any */
4361 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
4362 counter_u64_add(rack_enter_tlp_calc, 1);
4363 len = rsm->r_end - rsm->r_start;
4364 if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
4365 /* Exactly like the ID */
4366 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) {
4367 uint32_t alt_thresh;
4369 * Compensate for delayed-ack with the d-ack time.
4371 counter_u64_add(rack_used_tlpmethod, 1);
4372 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
4373 if (alt_thresh > thresh)
4374 thresh = alt_thresh;
4376 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
4378 prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
4379 if (prsm && (len <= segsiz)) {
4381 * Two packets outstanding, thresh should be (2*srtt) +
4382 * possible inter-packet delay (if any).
4384 uint32_t inter_gap = 0;
4387 counter_u64_add(rack_used_tlpmethod, 1);
4388 idx = rsm->r_rtr_cnt - 1;
4389 nidx = prsm->r_rtr_cnt - 1;
4390 if (TSTMP_GEQ(rsm->r_tim_lastsent[nidx], prsm->r_tim_lastsent[idx])) {
4391 /* Yes it was sent later (or at the same time) */
4392 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
4394 thresh += inter_gap;
4395 } else if (len <= segsiz) {
4397 * Possibly compensate for delayed-ack.
4399 uint32_t alt_thresh;
4401 counter_u64_add(rack_used_tlpmethod2, 1);
4402 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
4403 if (alt_thresh > thresh)
4404 thresh = alt_thresh;
4406 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
4408 if (len <= segsiz) {
4409 uint32_t alt_thresh;
4411 * Compensate for delayed-ack with the d-ack time.
4413 counter_u64_add(rack_used_tlpmethod, 1);
4414 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
4415 if (alt_thresh > thresh)
4416 thresh = alt_thresh;
4419 /* Not above an RTO */
4420 if (thresh > TICKS_2_MSEC(tp->t_rxtcur)) {
4421 thresh = TICKS_2_MSEC(tp->t_rxtcur);
4423 /* Not above a RTO max */
4424 if (thresh > rack_rto_max) {
4425 thresh = rack_rto_max;
4427 /* Apply user supplied min TLP */
4428 if (thresh < rack_tlp_min) {
4429 thresh = rack_tlp_min;
4435 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack)
4438 * We want the rack_rtt which is the
4439 * last rtt we measured. However if that
4440 * does not exist we fallback to the srtt (which
4441 * we probably will never do) and then as a last
4442 * resort we use RACK_INITIAL_RTO if no srtt is
4445 if (rack->rc_rack_rtt)
4446 return(rack->rc_rack_rtt);
4447 else if (tp->t_srtt == 0)
4448 return(RACK_INITIAL_RTO);
4449 return (TICKS_2_MSEC(tp->t_srtt >> TCP_RTT_SHIFT));
4452 static struct rack_sendmap *
4453 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
4456 * Check to see that we don't need to fall into recovery. We will
4457 * need to do so if our oldest transmit is past the time we should
4460 struct tcp_rack *rack;
4461 struct rack_sendmap *rsm;
4463 uint32_t srtt, thresh;
4465 rack = (struct tcp_rack *)tp->t_fb_ptr;
4466 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
4469 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
4473 if (rsm->r_flags & RACK_ACKED) {
4474 rsm = rack_find_lowest_rsm(rack);
4478 idx = rsm->r_rtr_cnt - 1;
4479 srtt = rack_grab_rtt(tp, rack);
4480 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
4481 if (TSTMP_LT(tsused, rsm->r_tim_lastsent[idx])) {
4484 if ((tsused - rsm->r_tim_lastsent[idx]) < thresh) {
4487 /* Ok if we reach here we are over-due and this guy can be sent */
4488 if (IN_RECOVERY(tp->t_flags) == 0) {
4490 * For the one that enters us into recovery record undo
4493 rack->r_ctl.rc_rsm_start = rsm->r_start;
4494 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
4495 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
4497 rack_cong_signal(tp, NULL, CC_NDUPACK);
4502 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
4508 t = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT) + ((tp->t_rttvar * 4) >> TCP_RTT_SHIFT));
4509 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
4510 rack_persist_min, rack_persist_max);
4511 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
4513 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
4514 ret_val = (uint32_t)tt;
4519 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack)
4522 * Start the FR timer, we do this based on getting the first one in
4523 * the rc_tmap. Note that if its NULL we must stop the timer. in all
4524 * events we need to stop the running timer (if its running) before
4525 * starting the new one.
4527 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse;
4530 int32_t is_tlp_timer = 0;
4531 struct rack_sendmap *rsm;
4533 if (rack->t_timers_stopped) {
4534 /* All timers have been stopped none are to run */
4537 if (rack->rc_in_persist) {
4538 /* We can't start any timer in persists */
4539 return (rack_get_persists_timer_val(tp, rack));
4541 rack->rc_on_min_to = 0;
4542 if ((tp->t_state < TCPS_ESTABLISHED) ||
4543 ((tp->t_flags & TF_SACK_PERMIT) == 0))
4545 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
4546 if ((rsm == NULL) || sup_rack) {
4547 /* Nothing on the send map */
4549 time_since_sent = 0;
4550 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
4552 idx = rsm->r_rtr_cnt - 1;
4553 if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time))
4554 tstmp_touse = rsm->r_tim_lastsent[idx];
4556 tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time;
4557 if (TSTMP_GT(cts, tstmp_touse))
4558 time_since_sent = cts - tstmp_touse;
4560 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
4561 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
4562 to = TICKS_2_MSEC(tp->t_rxtcur);
4563 if (to > time_since_sent)
4564 to -= time_since_sent;
4566 to = rack->r_ctl.rc_min_to;
4573 if (rsm->r_flags & RACK_ACKED) {
4574 rsm = rack_find_lowest_rsm(rack);
4580 if (rack->sack_attack_disable) {
4582 * We don't want to do
4583 * any TLP's if you are an attacker.
4584 * Though if you are doing what
4585 * is expected you may still have
4586 * SACK-PASSED marks.
4590 /* Convert from ms to usecs */
4591 if ((rsm->r_flags & RACK_SACK_PASSED) || (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
4592 if ((tp->t_flags & TF_SENTFIN) &&
4593 ((tp->snd_max - tp->snd_una) == 1) &&
4594 (rsm->r_flags & RACK_HAS_FIN)) {
4596 * We don't start a rack timer if all we have is a
4601 if ((rack->use_rack_rr == 0) &&
4602 (IN_RECOVERY(tp->t_flags)) &&
4603 (rack->rack_no_prr == 0) &&
4604 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) {
4606 * We are not cheating, in recovery and
4607 * not enough ack's to yet get our next
4608 * retransmission out.
4610 * Note that classified attackers do not
4611 * get to use the rack-cheat.
4615 srtt = rack_grab_rtt(tp, rack);
4616 thresh = rack_calc_thresh_rack(rack, srtt, cts);
4617 idx = rsm->r_rtr_cnt - 1;
4618 exp = rsm->r_tim_lastsent[idx] + thresh;
4619 if (SEQ_GEQ(exp, cts)) {
4621 if (to < rack->r_ctl.rc_min_to) {
4622 to = rack->r_ctl.rc_min_to;
4623 if (rack->r_rr_config == 3)
4624 rack->rc_on_min_to = 1;
4627 to = rack->r_ctl.rc_min_to;
4628 if (rack->r_rr_config == 3)
4629 rack->rc_on_min_to = 1;
4632 /* Ok we need to do a TLP not RACK */
4634 if ((rack->rc_tlp_in_progress != 0) &&
4635 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) {
4637 * The previous send was a TLP and we have sent
4638 * N TLP's without sending new data.
4642 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
4644 /* We found no rsm to TLP with. */
4647 if (rsm->r_flags & RACK_HAS_FIN) {
4648 /* If its a FIN we dont do TLP */
4652 idx = rsm->r_rtr_cnt - 1;
4653 time_since_sent = 0;
4654 if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time))
4655 tstmp_touse = rsm->r_tim_lastsent[idx];
4657 tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time;
4658 if (TSTMP_GT(cts, tstmp_touse))
4659 time_since_sent = cts - tstmp_touse;
4662 srtt_cur = (tp->t_srtt >> TCP_RTT_SHIFT);
4663 srtt = TICKS_2_MSEC(srtt_cur);
4665 srtt = RACK_INITIAL_RTO;
4667 * If the SRTT is not keeping up and the
4668 * rack RTT has spiked we want to use
4669 * the last RTT not the smoothed one.
4671 if (rack_tlp_use_greater && (srtt < rack_grab_rtt(tp, rack)))
4672 srtt = rack_grab_rtt(tp, rack);
4673 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
4674 if (thresh > time_since_sent)
4675 to = thresh - time_since_sent;
4677 to = rack->r_ctl.rc_min_to;
4678 rack_log_alt_to_to_cancel(rack,
4680 time_since_sent, /* flex2 */
4681 tstmp_touse, /* flex3 */
4682 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */
4683 rsm->r_tim_lastsent[idx],
4687 if (to > TCPTV_REXMTMAX) {
4689 * If the TLP time works out to larger than the max
4690 * RTO lets not do TLP.. just RTO.
4695 if (is_tlp_timer == 0) {
4696 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
4698 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
4706 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
4708 if (rack->rc_in_persist == 0) {
4709 if (tp->t_flags & TF_GPUTINPROG) {
4711 * Stop the goodput now, the calling of the
4712 * measurement function clears the flag.
4714 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__);
4716 #ifdef NETFLIX_SHARED_CWND
4717 if (rack->r_ctl.rc_scw) {
4718 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
4719 rack->rack_scwnd_is_idle = 1;
4722 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
4723 if (rack->r_ctl.rc_went_idle_time == 0)
4724 rack->r_ctl.rc_went_idle_time = 1;
4725 rack_timer_cancel(tp, rack, cts, __LINE__);
4727 rack->rc_in_persist = 1;
4732 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
4734 if (rack->rc_inp->inp_in_hpts) {
4735 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
4736 rack->r_ctl.rc_hpts_flags = 0;
4738 #ifdef NETFLIX_SHARED_CWND
4739 if (rack->r_ctl.rc_scw) {
4740 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
4741 rack->rack_scwnd_is_idle = 0;
4744 if (rack->rc_gp_dyn_mul &&
4745 (rack->use_fixed_rate == 0) &&
4746 (rack->rc_always_pace)) {
4748 * Do we count this as if a probe-rtt just
4751 uint32_t time_idle, idle_min;
4753 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time;
4754 idle_min = rack_min_probertt_hold;
4755 if (rack_probertt_gpsrtt_cnt_div) {
4757 extra = (uint64_t)rack->r_ctl.rc_gp_srtt *
4758 (uint64_t)rack_probertt_gpsrtt_cnt_mul;
4759 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div;
4760 idle_min += (uint32_t)extra;
4762 if (time_idle >= idle_min) {
4763 /* Yes, we count it as a probe-rtt. */
4766 us_cts = tcp_get_usecs(NULL);
4767 if (rack->in_probe_rtt == 0) {
4768 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
4769 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
4770 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
4771 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
4773 rack_exit_probertt(rack, us_cts);
4778 rack->rc_in_persist = 0;
4779 rack->r_ctl.rc_went_idle_time = 0;
4781 rack->r_ctl.rc_agg_delayed = 0;
4784 rack->r_ctl.rc_agg_early = 0;
4788 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
4789 struct hpts_diag *diag, struct timeval *tv)
4791 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
4792 union tcp_log_stackspecific log;
4794 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4795 log.u_bbr.flex1 = diag->p_nxt_slot;
4796 log.u_bbr.flex2 = diag->p_cur_slot;
4797 log.u_bbr.flex3 = diag->slot_req;
4798 log.u_bbr.flex4 = diag->inp_hptsslot;
4799 log.u_bbr.flex5 = diag->slot_remaining;
4800 log.u_bbr.flex6 = diag->need_new_to;
4801 log.u_bbr.flex7 = diag->p_hpts_active;
4802 log.u_bbr.flex8 = diag->p_on_min_sleep;
4803 /* Hijack other fields as needed */
4804 log.u_bbr.epoch = diag->have_slept;
4805 log.u_bbr.lt_epoch = diag->yet_to_sleep;
4806 log.u_bbr.pkts_out = diag->co_ret;
4807 log.u_bbr.applimited = diag->hpts_sleep_time;
4808 log.u_bbr.delivered = diag->p_prev_slot;
4809 log.u_bbr.inflight = diag->p_runningtick;
4810 log.u_bbr.bw_inuse = diag->wheel_tick;
4811 log.u_bbr.rttProp = diag->wheel_cts;
4812 log.u_bbr.timeStamp = cts;
4813 log.u_bbr.delRate = diag->maxticks;
4814 log.u_bbr.cur_del_rate = diag->p_curtick;
4815 log.u_bbr.cur_del_rate <<= 32;
4816 log.u_bbr.cur_del_rate |= diag->p_lasttick;
4817 TCP_LOG_EVENTP(rack->rc_tp, NULL,
4818 &rack->rc_inp->inp_socket->so_rcv,
4819 &rack->rc_inp->inp_socket->so_snd,
4820 BBR_LOG_HPTSDIAG, 0,
4821 0, &log, false, tv);
4827 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
4828 int32_t slot, uint32_t tot_len_this_send, int sup_rack)
4830 struct hpts_diag diag;
4833 uint32_t delayed_ack = 0;
4834 uint32_t hpts_timeout;
4840 if ((tp->t_state == TCPS_CLOSED) ||
4841 (tp->t_state == TCPS_LISTEN)) {
4844 if (inp->inp_in_hpts) {
4845 /* Already on the pacer */
4848 stopped = rack->rc_tmr_stopped;
4849 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
4850 left = rack->r_ctl.rc_timer_exp - cts;
4852 rack->r_ctl.rc_timer_exp = 0;
4853 rack->r_ctl.rc_hpts_flags = 0;
4854 us_cts = tcp_get_usecs(&tv);
4855 /* Now early/late accounting */
4856 if (rack->r_early) {
4858 * We have a early carry over set,
4859 * we can always add more time so we
4860 * can always make this compensation.
4862 slot += rack->r_ctl.rc_agg_early;
4864 rack->r_ctl.rc_agg_early = 0;
4868 * This is harder, we can
4869 * compensate some but it
4870 * really depends on what
4871 * the current pacing time is.
4873 if (rack->r_ctl.rc_agg_delayed >= slot) {
4875 * We can't compensate for it all.
4876 * And we have to have some time
4877 * on the clock. We always have a min
4878 * 10 slots (10 x 10 i.e. 100 usecs).
4880 if (slot <= HPTS_TICKS_PER_USEC) {
4882 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_USEC - slot);
4883 slot = HPTS_TICKS_PER_USEC;
4885 /* We take off some */
4886 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_USEC);
4887 slot = HPTS_TICKS_PER_USEC;
4891 slot -= rack->r_ctl.rc_agg_delayed;
4892 rack->r_ctl.rc_agg_delayed = 0;
4893 /* Make sure we have 100 useconds at minimum */
4894 if (slot < HPTS_TICKS_PER_USEC) {
4895 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_USEC - slot;
4896 slot = HPTS_TICKS_PER_USEC;
4898 if (rack->r_ctl.rc_agg_delayed == 0)
4903 /* We are pacing too */
4904 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
4906 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack);
4907 #ifdef NETFLIX_EXP_DETECTION
4908 if (rack->sack_attack_disable &&
4909 (slot < tcp_sad_pacing_interval)) {
4911 * We have a potential attacker on
4912 * the line. We have possibly some
4913 * (or now) pacing time set. We want to
4914 * slow down the processing of sacks by some
4915 * amount (if it is an attacker). Set the default
4916 * slot for attackers in place (unless the orginal
4917 * interval is longer). Its stored in
4918 * micro-seconds, so lets convert to msecs.
4920 slot = tcp_sad_pacing_interval;
4923 if (tp->t_flags & TF_DELACK) {
4924 delayed_ack = TICKS_2_MSEC(tcp_delacktime);
4925 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
4927 if (delayed_ack && ((hpts_timeout == 0) ||
4928 (delayed_ack < hpts_timeout)))
4929 hpts_timeout = delayed_ack;
4931 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
4933 * If no timers are going to run and we will fall off the hptsi
4934 * wheel, we resort to a keep-alive timer if its configured.
4936 if ((hpts_timeout == 0) &&
4938 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
4939 (tp->t_state <= TCPS_CLOSING)) {
4941 * Ok we have no timer (persists, rack, tlp, rxt or
4942 * del-ack), we don't have segments being paced. So
4943 * all that is left is the keepalive timer.
4945 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
4946 /* Get the established keep-alive time */
4947 hpts_timeout = TP_KEEPIDLE(tp);
4949 /* Get the initial setup keep-alive time */
4950 hpts_timeout = TP_KEEPINIT(tp);
4952 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
4953 if (rack->in_probe_rtt) {
4955 * We want to instead not wake up a long time from
4956 * now but to wake up about the time we would
4957 * exit probe-rtt and initiate a keep-alive ack.
4958 * This will get us out of probe-rtt and update
4961 hpts_timeout = (rack_min_probertt_hold / HPTS_USEC_IN_MSEC);
4965 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
4966 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
4968 * RACK, TLP, persists and RXT timers all are restartable
4969 * based on actions input .. i.e we received a packet (ack
4970 * or sack) and that changes things (rw, or snd_una etc).
4971 * Thus we can restart them with a new value. For
4972 * keep-alive, delayed_ack we keep track of what was left
4973 * and restart the timer with a smaller value.
4975 if (left < hpts_timeout)
4976 hpts_timeout = left;
4980 * Hack alert for now we can't time-out over 2,147,483
4981 * seconds (a bit more than 596 hours), which is probably ok
4984 if (hpts_timeout > 0x7ffffffe)
4985 hpts_timeout = 0x7ffffffe;
4986 rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
4988 if ((rack->rc_gp_filled == 0) &&
4989 (hpts_timeout < slot) &&
4990 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
4992 * We have no good estimate yet for the
4993 * old clunky burst mitigation or the
4994 * real pacing. And the tlp or rxt is smaller
4995 * than the pacing calculation. Lets not
4996 * pace that long since we know the calculation
4997 * so far is not accurate.
4999 slot = hpts_timeout;
5001 rack->r_ctl.last_pacing_time = slot;
5003 rack->r_ctl.rc_last_output_to = us_cts + slot;
5004 if (rack->rc_always_pace || rack->r_mbuf_queue) {
5005 if ((rack->rc_gp_filled == 0) ||
5006 rack->pacing_longer_than_rtt) {
5007 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
5009 inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
5010 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) &&
5011 (rack->r_rr_config != 3))
5012 inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
5014 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
5017 if ((rack->use_rack_rr) &&
5018 (rack->r_rr_config < 2) &&
5019 ((hpts_timeout) && ((hpts_timeout * HPTS_USEC_IN_MSEC) < slot))) {
5021 * Arrange for the hpts to kick back in after the
5022 * t-o if the t-o does not cause a send.
5024 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout),
5026 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5027 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
5029 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot),
5031 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5032 rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
5034 } else if (hpts_timeout) {
5035 if (rack->rc_always_pace || rack->r_mbuf_queue) {
5036 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) {
5037 /* For a rack timer, don't wake us */
5038 inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
5039 if (rack->r_rr_config != 3)
5040 inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
5042 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
5044 /* All other timers wake us up */
5045 inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
5046 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
5049 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout),
5051 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5052 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
5054 /* No timer starting */
5056 if (SEQ_GT(tp->snd_max, tp->snd_una)) {
5057 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
5058 tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
5062 rack->rc_tmr_stopped = 0;
5064 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv);
5068 * RACK Timer, here we simply do logging and house keeping.
5069 * the normal rack_output() function will call the
5070 * appropriate thing to check if we need to do a RACK retransmit.
5071 * We return 1, saying don't proceed with rack_output only
5072 * when all timers have been stopped (destroyed PCB?).
5075 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5078 * This timer simply provides an internal trigger to send out data.
5079 * The check_recovery_mode call will see if there are needed
5080 * retransmissions, if so we will enter fast-recovery. The output
5081 * call may or may not do the same thing depending on sysctl
5084 struct rack_sendmap *rsm;
5087 if (tp->t_timers->tt_flags & TT_STOPPED) {
5090 recovery = IN_RECOVERY(tp->t_flags);
5091 counter_u64_add(rack_to_tot, 1);
5092 if (rack->r_state && (rack->r_state != tp->t_state))
5093 rack_set_state(tp, rack);
5094 rack->rc_on_min_to = 0;
5095 rsm = rack_check_recovery_mode(tp, cts);
5096 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm);
5100 rack->r_ctl.rc_resend = rsm;
5101 if (rack->use_rack_rr) {
5103 * Don't accumulate extra pacing delay
5104 * we are allowing the rack timer to
5105 * over-ride pacing i.e. rrr takes precedence
5106 * if the pacing interval is longer than the rrr
5107 * time (in other words we get the min pacing
5108 * time versus rrr pacing time).
5110 rack->r_timer_override = 1;
5111 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
5113 rtt = rack->rc_rack_rtt;
5116 if (rack->rack_no_prr == 0) {
5117 if ((recovery == 0) &&
5118 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) {
5120 * The rack-timeout that enter's us into recovery
5121 * will force out one MSS and set us up so that we
5122 * can do one more send in 2*rtt (transitioning the
5123 * rack timeout into a rack-tlp).
5125 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
5126 rack->r_timer_override = 1;
5127 rack_log_to_prr(rack, 3, 0);
5128 } else if ((rack->r_ctl.rc_prr_sndcnt < (rsm->r_end - rsm->r_start)) &&
5129 rack->use_rack_rr) {
5131 * When a rack timer goes, if the rack rr is
5132 * on, arrange it so we can send a full segment
5133 * overriding prr (though we pay a price for this
5134 * for future new sends).
5136 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
5137 rack_log_to_prr(rack, 4, 0);
5141 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
5143 /* restart a timer and return 1 */
5144 rack_start_hpts_timer(rack, tp, cts,
5151 static __inline void
5152 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm,
5153 struct rack_sendmap *rsm, uint32_t start)
5157 nrsm->r_start = start;
5158 nrsm->r_end = rsm->r_end;
5159 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
5160 nrsm->r_flags = rsm->r_flags;
5161 nrsm->r_dupack = rsm->r_dupack;
5162 nrsm->usec_orig_send = rsm->usec_orig_send;
5163 nrsm->r_rtr_bytes = 0;
5164 rsm->r_end = nrsm->r_start;
5165 nrsm->r_just_ret = rsm->r_just_ret;
5166 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
5167 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
5171 static struct rack_sendmap *
5172 rack_merge_rsm(struct tcp_rack *rack,
5173 struct rack_sendmap *l_rsm,
5174 struct rack_sendmap *r_rsm)
5177 * We are merging two ack'd RSM's,
5178 * the l_rsm is on the left (lower seq
5179 * values) and the r_rsm is on the right
5180 * (higher seq value). The simplest way
5181 * to merge these is to move the right
5182 * one into the left. I don't think there
5183 * is any reason we need to try to find
5184 * the oldest (or last oldest retransmitted).
5186 struct rack_sendmap *rm;
5188 l_rsm->r_end = r_rsm->r_end;
5189 if (l_rsm->r_dupack < r_rsm->r_dupack)
5190 l_rsm->r_dupack = r_rsm->r_dupack;
5191 if (r_rsm->r_rtr_bytes)
5192 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes;
5193 if (r_rsm->r_in_tmap) {
5194 /* This really should not happen */
5195 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext);
5196 r_rsm->r_in_tmap = 0;
5200 if (r_rsm->r_flags & RACK_HAS_FIN)
5201 l_rsm->r_flags |= RACK_HAS_FIN;
5202 if (r_rsm->r_flags & RACK_TLP)
5203 l_rsm->r_flags |= RACK_TLP;
5204 if (r_rsm->r_flags & RACK_RWND_COLLAPSED)
5205 l_rsm->r_flags |= RACK_RWND_COLLAPSED;
5206 if ((r_rsm->r_flags & RACK_APP_LIMITED) &&
5207 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) {
5209 * If both are app-limited then let the
5210 * free lower the count. If right is app
5211 * limited and left is not, transfer.
5213 l_rsm->r_flags |= RACK_APP_LIMITED;
5214 r_rsm->r_flags &= ~RACK_APP_LIMITED;
5215 if (r_rsm == rack->r_ctl.rc_first_appl)
5216 rack->r_ctl.rc_first_appl = l_rsm;
5218 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm);
5221 panic("removing head in rack:%p rsm:%p rm:%p",
5225 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) {
5226 /* Transfer the split limit to the map we free */
5227 r_rsm->r_limit_type = l_rsm->r_limit_type;
5228 l_rsm->r_limit_type = 0;
5230 rack_free(rack, r_rsm);
5235 * TLP Timer, here we simply setup what segment we want to
5236 * have the TLP expire on, the normal rack_output() will then
5239 * We return 1, saying don't proceed with rack_output only
5240 * when all timers have been stopped (destroyed PCB?).
5243 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5248 struct rack_sendmap *rsm = NULL;
5249 struct rack_sendmap *insret;
5251 uint32_t amm, old_prr_snd = 0;
5252 uint32_t out, avail;
5253 int collapsed_win = 0;
5255 if (tp->t_timers->tt_flags & TT_STOPPED) {
5258 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
5259 /* Its not time yet */
5262 if (ctf_progress_timeout_check(tp, true)) {
5263 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
5264 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
5268 * A TLP timer has expired. We have been idle for 2 rtts. So we now
5269 * need to figure out how to force a full MSS segment out.
5271 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL);
5272 counter_u64_add(rack_tlp_tot, 1);
5273 if (rack->r_state && (rack->r_state != tp->t_state))
5274 rack_set_state(tp, rack);
5275 so = tp->t_inpcb->inp_socket;
5277 if (rack->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) {
5279 * For hardware TLS we do *not* want to send
5280 * new data, lets instead just do a retransmission.
5285 avail = sbavail(&so->so_snd);
5286 out = tp->snd_max - tp->snd_una;
5287 if (out > tp->snd_wnd) {
5288 /* special case, we need a retransmission */
5293 * Check our send oldest always settings, and if
5294 * there is an oldest to send jump to the need_retran.
5296 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0))
5300 /* New data is available */
5302 if (amm > ctf_fixed_maxseg(tp)) {
5303 amm = ctf_fixed_maxseg(tp);
5304 if ((amm + out) > tp->snd_wnd) {
5305 /* We are rwnd limited */
5308 } else if (amm < ctf_fixed_maxseg(tp)) {
5309 /* not enough to fill a MTU */
5312 if (IN_RECOVERY(tp->t_flags)) {
5314 if (rack->rack_no_prr == 0) {
5315 old_prr_snd = rack->r_ctl.rc_prr_sndcnt;
5316 if (out + amm <= tp->snd_wnd) {
5317 rack->r_ctl.rc_prr_sndcnt = amm;
5318 rack_log_to_prr(rack, 4, 0);
5323 /* Set the send-new override */
5324 if (out + amm <= tp->snd_wnd)
5325 rack->r_ctl.rc_tlp_new_data = amm;
5329 rack->r_ctl.rc_tlpsend = NULL;
5330 counter_u64_add(rack_tlp_newdata, 1);
5335 * Ok we need to arrange the last un-acked segment to be re-sent, or
5336 * optionally the first un-acked segment.
5338 if (collapsed_win == 0) {
5339 if (rack_always_send_oldest)
5340 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5342 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
5343 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
5344 rsm = rack_find_high_nonack(rack, rsm);
5348 counter_u64_add(rack_tlp_does_nada, 1);
5350 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
5356 * We must find the last segment
5357 * that was acceptable by the client.
5359 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
5360 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) {
5366 /* None? if so send the first */
5367 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
5369 counter_u64_add(rack_tlp_does_nada, 1);
5371 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
5377 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) {
5379 * We need to split this the last segment in two.
5381 struct rack_sendmap *nrsm;
5384 nrsm = rack_alloc_full_limit(rack);
5387 * No memory to split, we will just exit and punt
5388 * off to the RXT timer.
5390 counter_u64_add(rack_tlp_does_nada, 1);
5393 rack_clone_rsm(rack, nrsm, rsm,
5394 (rsm->r_end - ctf_fixed_maxseg(tp)));
5395 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
5397 if (insret != NULL) {
5398 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
5399 nrsm, insret, rack, rsm);
5402 if (rsm->r_in_tmap) {
5403 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
5404 nrsm->r_in_tmap = 1;
5406 rsm->r_flags &= (~RACK_HAS_FIN);
5409 rack->r_ctl.rc_tlpsend = rsm;
5411 rack->r_timer_override = 1;
5412 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
5415 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
5420 * Delayed ack Timer, here we simply need to setup the
5421 * ACK_NOW flag and remove the DELACK flag. From there
5422 * the output routine will send the ack out.
5424 * We only return 1, saying don't proceed, if all timers
5425 * are stopped (destroyed PCB?).
5428 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5430 if (tp->t_timers->tt_flags & TT_STOPPED) {
5433 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL);
5434 tp->t_flags &= ~TF_DELACK;
5435 tp->t_flags |= TF_ACKNOW;
5436 KMOD_TCPSTAT_INC(tcps_delack);
5437 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
5442 * Persists timer, here we simply send the
5443 * same thing as a keepalive will.
5444 * the one byte send.
5446 * We only return 1, saying don't proceed, if all timers
5447 * are stopped (destroyed PCB?).
5450 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5452 struct tcptemp *t_template;
5458 if (tp->t_timers->tt_flags & TT_STOPPED) {
5461 if (rack->rc_in_persist == 0)
5463 if (ctf_progress_timeout_check(tp, false)) {
5464 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
5465 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
5466 tcp_set_inp_to_drop(inp, ETIMEDOUT);
5469 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
5471 * Persistence timer into zero window. Force a byte to be output, if
5474 KMOD_TCPSTAT_INC(tcps_persisttimeo);
5476 * Hack: if the peer is dead/unreachable, we do not time out if the
5477 * window is closed. After a full backoff, drop the connection if
5478 * the idle time (no responses to probes) reaches the maximum
5479 * backoff that we would use if retransmitting.
5481 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
5482 (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
5483 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
5484 KMOD_TCPSTAT_INC(tcps_persistdrop);
5486 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
5487 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
5490 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
5491 tp->snd_una == tp->snd_max)
5492 rack_exit_persist(tp, rack, cts);
5493 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
5495 * If the user has closed the socket then drop a persisting
5496 * connection after a much reduced timeout.
5498 if (tp->t_state > TCPS_CLOSE_WAIT &&
5499 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
5501 KMOD_TCPSTAT_INC(tcps_persistdrop);
5502 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
5503 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
5506 t_template = tcpip_maketemplate(rack->rc_inp);
5508 /* only set it if we were answered */
5509 if (rack->forced_ack == 0) {
5510 rack->forced_ack = 1;
5511 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
5513 tcp_respond(tp, t_template->tt_ipgen,
5514 &t_template->tt_t, (struct mbuf *)NULL,
5515 tp->rcv_nxt, tp->snd_una - 1, 0);
5516 /* This sends an ack */
5517 if (tp->t_flags & TF_DELACK)
5518 tp->t_flags &= ~TF_DELACK;
5519 free(t_template, M_TEMP);
5521 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
5524 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL);
5525 rack_start_hpts_timer(rack, tp, cts,
5531 * If a keepalive goes off, we had no other timers
5532 * happening. We always return 1 here since this
5533 * routine either drops the connection or sends
5534 * out a segment with respond.
5537 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5539 struct tcptemp *t_template;
5542 if (tp->t_timers->tt_flags & TT_STOPPED) {
5545 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
5547 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL);
5549 * Keep-alive timer went off; send something or drop connection if
5550 * idle for too long.
5552 KMOD_TCPSTAT_INC(tcps_keeptimeo);
5553 if (tp->t_state < TCPS_ESTABLISHED)
5555 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
5556 tp->t_state <= TCPS_CLOSING) {
5557 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
5560 * Send a packet designed to force a response if the peer is
5561 * up and reachable: either an ACK if the connection is
5562 * still alive, or an RST if the peer has closed the
5563 * connection due to timeout or reboot. Using sequence
5564 * number tp->snd_una-1 causes the transmitted zero-length
5565 * segment to lie outside the receive window; by the
5566 * protocol spec, this requires the correspondent TCP to
5569 KMOD_TCPSTAT_INC(tcps_keepprobe);
5570 t_template = tcpip_maketemplate(inp);
5572 if (rack->forced_ack == 0) {
5573 rack->forced_ack = 1;
5574 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
5576 tcp_respond(tp, t_template->tt_ipgen,
5577 &t_template->tt_t, (struct mbuf *)NULL,
5578 tp->rcv_nxt, tp->snd_una - 1, 0);
5579 free(t_template, M_TEMP);
5582 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
5585 KMOD_TCPSTAT_INC(tcps_keepdrops);
5586 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
5587 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
5592 * Retransmit helper function, clear up all the ack
5593 * flags and take care of important book keeping.
5596 rack_remxt_tmr(struct tcpcb *tp)
5599 * The retransmit timer went off, all sack'd blocks must be
5602 struct rack_sendmap *rsm, *trsm = NULL;
5603 struct tcp_rack *rack;
5606 rack = (struct tcp_rack *)tp->t_fb_ptr;
5607 rack_timer_cancel(tp, rack, tcp_ts_getticks(), __LINE__);
5608 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL);
5609 if (rack->r_state && (rack->r_state != tp->t_state))
5610 rack_set_state(tp, rack);
5612 * Ideally we would like to be able to
5613 * mark SACK-PASS on anything not acked here.
5614 * However, if we do that we would burst out
5615 * all that data 1ms apart. This would be unwise,
5616 * so for now we will just let the normal rxt timer
5617 * and tlp timer take care of it.
5619 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
5620 if (rsm->r_flags & RACK_ACKED) {
5623 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
5624 if (rsm->r_in_tmap == 0) {
5625 /* We must re-add it back to the tlist */
5627 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
5629 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
5635 if (rsm->r_flags & RACK_ACKED)
5636 rsm->r_flags |= RACK_WAS_ACKED;
5637 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS);
5639 /* Clear the count (we just un-acked them) */
5640 rack->r_ctl.rc_sacked = 0;
5641 rack->r_ctl.rc_agg_delayed = 0;
5643 rack->r_ctl.rc_agg_early = 0;
5645 /* Clear the tlp rtx mark */
5646 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
5647 rack->r_ctl.rc_prr_sndcnt = 0;
5648 rack_log_to_prr(rack, 6, 0);
5649 rack->r_timer_override = 1;
5653 rack_cc_conn_init(struct tcpcb *tp)
5655 struct tcp_rack *rack;
5658 rack = (struct tcp_rack *)tp->t_fb_ptr;
5661 * We want a chance to stay in slowstart as
5662 * we create a connection. TCP spec says that
5663 * initially ssthresh is infinite. For our
5664 * purposes that is the snd_wnd.
5666 if (tp->snd_ssthresh < tp->snd_wnd) {
5667 tp->snd_ssthresh = tp->snd_wnd;
5670 * We also want to assure a IW worth of
5671 * data can get inflight.
5673 if (rc_init_window(rack) < tp->snd_cwnd)
5674 tp->snd_cwnd = rc_init_window(rack);
5678 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
5679 * we will setup to retransmit the lowest seq number outstanding.
5682 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5690 if (tp->t_timers->tt_flags & TT_STOPPED) {
5693 if (ctf_progress_timeout_check(tp, false)) {
5694 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
5695 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
5696 tcp_set_inp_to_drop(inp, ETIMEDOUT);
5699 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
5700 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
5701 (tp->snd_una == tp->snd_max)) {
5702 /* Nothing outstanding .. nothing to do */
5706 * Retransmission timer went off. Message has not been acked within
5707 * retransmit interval. Back off to a longer retransmit interval
5708 * and retransmit one segment.
5711 if ((rack->r_ctl.rc_resend == NULL) ||
5712 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) {
5714 * If the rwnd collapsed on
5715 * the one we are retransmitting
5716 * it does not count against the
5721 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) {
5722 tp->t_rxtshift = TCP_MAXRXTSHIFT;
5723 KMOD_TCPSTAT_INC(tcps_timeoutdrop);
5725 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
5726 tcp_set_inp_to_drop(rack->rc_inp,
5727 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT));
5730 if (tp->t_state == TCPS_SYN_SENT) {
5732 * If the SYN was retransmitted, indicate CWND to be limited
5733 * to 1 segment in cc_conn_init().
5736 } else if (tp->t_rxtshift == 1) {
5738 * first retransmit; record ssthresh and cwnd so they can be
5739 * recovered if this turns out to be a "bad" retransmit. A
5740 * retransmit is considered "bad" if an ACK for this segment
5741 * is received within RTT/2 interval; the assumption here is
5742 * that the ACK was already in flight. See "On Estimating
5743 * End-to-End Network Path Properties" by Allman and Paxson
5746 tp->snd_cwnd_prev = tp->snd_cwnd;
5747 tp->snd_ssthresh_prev = tp->snd_ssthresh;
5748 tp->snd_recover_prev = tp->snd_recover;
5749 if (IN_FASTRECOVERY(tp->t_flags))
5750 tp->t_flags |= TF_WASFRECOVERY;
5752 tp->t_flags &= ~TF_WASFRECOVERY;
5753 if (IN_CONGRECOVERY(tp->t_flags))
5754 tp->t_flags |= TF_WASCRECOVERY;
5756 tp->t_flags &= ~TF_WASCRECOVERY;
5757 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
5758 tp->t_flags |= TF_PREVVALID;
5760 tp->t_flags &= ~TF_PREVVALID;
5761 KMOD_TCPSTAT_INC(tcps_rexmttimeo);
5762 if ((tp->t_state == TCPS_SYN_SENT) ||
5763 (tp->t_state == TCPS_SYN_RECEIVED))
5764 rexmt = MSEC_2_TICKS(RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]);
5766 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
5767 TCPT_RANGESET(tp->t_rxtcur, rexmt,
5768 max(MSEC_2_TICKS(rack_rto_min), rexmt),
5769 MSEC_2_TICKS(rack_rto_max));
5771 * We enter the path for PLMTUD if connection is established or, if
5772 * connection is FIN_WAIT_1 status, reason for the last is that if
5773 * amount of data we send is very small, we could send it in couple
5774 * of packets and process straight to FIN. In that case we won't
5775 * catch ESTABLISHED state.
5778 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false;
5782 if (((V_tcp_pmtud_blackhole_detect == 1) ||
5783 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) ||
5784 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) &&
5785 ((tp->t_state == TCPS_ESTABLISHED) ||
5786 (tp->t_state == TCPS_FIN_WAIT_1))) {
5789 * Idea here is that at each stage of mtu probe (usually,
5790 * 1448 -> 1188 -> 524) should be given 2 chances to recover
5791 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
5792 * should take care of that.
5794 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
5795 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
5796 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
5797 tp->t_rxtshift % 2 == 0)) {
5799 * Enter Path MTU Black-hole Detection mechanism: -
5800 * Disable Path MTU Discovery (IP "DF" bit). -
5801 * Reduce MTU to lower value than what we negotiated
5804 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
5805 /* Record that we may have found a black hole. */
5806 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
5807 /* Keep track of previous MSS. */
5808 tp->t_pmtud_saved_maxseg = tp->t_maxseg;
5812 * Reduce the MSS to blackhole value or to the
5813 * default in an attempt to retransmit.
5817 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
5818 /* Use the sysctl tuneable blackhole MSS. */
5819 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
5820 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
5821 } else if (isipv6) {
5822 /* Use the default MSS. */
5823 tp->t_maxseg = V_tcp_v6mssdflt;
5825 * Disable Path MTU Discovery when we switch
5828 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
5829 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
5832 #if defined(INET6) && defined(INET)
5836 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
5837 /* Use the sysctl tuneable blackhole MSS. */
5838 tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
5839 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
5841 /* Use the default MSS. */
5842 tp->t_maxseg = V_tcp_mssdflt;
5844 * Disable Path MTU Discovery when we switch
5847 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
5848 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
5853 * If further retransmissions are still unsuccessful
5854 * with a lowered MTU, maybe this isn't a blackhole
5855 * and we restore the previous MSS and blackhole
5856 * detection flags. The limit '6' is determined by
5857 * giving each probe stage (1448, 1188, 524) 2
5858 * chances to recover.
5860 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
5861 (tp->t_rxtshift >= 6)) {
5862 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
5863 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
5864 tp->t_maxseg = tp->t_pmtud_saved_maxseg;
5865 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed);
5870 * If we backed off this far, our srtt estimate is probably bogus.
5871 * Clobber it so we'll take the next rtt measurement as our srtt;
5872 * move the current srtt into rttvar to keep the current retransmit
5875 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
5877 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
5878 in6_losing(tp->t_inpcb);
5881 in_losing(tp->t_inpcb);
5882 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
5885 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
5886 tp->snd_recover = tp->snd_max;
5887 tp->t_flags |= TF_ACKNOW;
5889 rack_cong_signal(tp, NULL, CC_RTO);
5895 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling)
5898 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
5903 if (tp->t_state == TCPS_LISTEN) {
5904 /* no timers on listen sockets */
5905 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
5909 if ((timers & PACE_TMR_RACK) &&
5910 rack->rc_on_min_to) {
5912 * For the rack timer when we
5913 * are on a min-timeout (which means rrr_conf = 3)
5914 * we don't want to check the timer. It may
5915 * be going off for a pace and thats ok we
5916 * want to send the retransmit (if its ready).
5918 * If its on a normal rack timer (non-min) then
5919 * we will check if its expired.
5921 goto skip_time_check;
5923 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
5926 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
5928 rack_log_to_processing(rack, cts, ret, 0);
5931 if (hpts_calling == 0) {
5933 * A user send or queued mbuf (sack) has called us? We
5934 * return 0 and let the pacing guards
5935 * deal with it if they should or
5936 * should not cause a send.
5939 rack_log_to_processing(rack, cts, ret, 0);
5943 * Ok our timer went off early and we are not paced false
5944 * alarm, go back to sleep.
5947 left = rack->r_ctl.rc_timer_exp - cts;
5948 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left));
5949 rack_log_to_processing(rack, cts, ret, left);
5953 rack->rc_tmr_stopped = 0;
5954 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
5955 if (timers & PACE_TMR_DELACK) {
5956 ret = rack_timeout_delack(tp, rack, cts);
5957 } else if (timers & PACE_TMR_RACK) {
5958 rack->r_ctl.rc_tlp_rxt_last_time = cts;
5959 ret = rack_timeout_rack(tp, rack, cts);
5960 } else if (timers & PACE_TMR_TLP) {
5961 rack->r_ctl.rc_tlp_rxt_last_time = cts;
5962 ret = rack_timeout_tlp(tp, rack, cts);
5963 } else if (timers & PACE_TMR_RXT) {
5964 rack->r_ctl.rc_tlp_rxt_last_time = cts;
5965 ret = rack_timeout_rxt(tp, rack, cts);
5966 } else if (timers & PACE_TMR_PERSIT) {
5967 ret = rack_timeout_persist(tp, rack, cts);
5968 } else if (timers & PACE_TMR_KEEP) {
5969 ret = rack_timeout_keepalive(tp, rack, cts);
5971 rack_log_to_processing(rack, cts, ret, timers);
5976 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
5979 uint32_t us_cts, flags_on_entry;
5980 uint8_t hpts_removed = 0;
5983 flags_on_entry = rack->r_ctl.rc_hpts_flags;
5984 us_cts = tcp_get_usecs(&tv);
5985 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
5986 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) ||
5987 ((tp->snd_max - tp->snd_una) == 0))) {
5988 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
5990 /* If we were not delayed cancel out the flag. */
5991 if ((tp->snd_max - tp->snd_una) == 0)
5992 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
5993 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
5995 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
5996 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
5997 if (rack->rc_inp->inp_in_hpts &&
5998 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
6000 * Canceling timer's when we have no output being
6001 * paced. We also must remove ourselves from the
6004 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
6007 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
6009 if (hpts_removed == 0)
6010 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
6014 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type)
6020 rack_stopall(struct tcpcb *tp)
6022 struct tcp_rack *rack;
6023 rack = (struct tcp_rack *)tp->t_fb_ptr;
6024 rack->t_timers_stopped = 1;
6029 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta)
6035 rack_timer_active(struct tcpcb *tp, uint32_t timer_type)
6041 rack_stop_all_timers(struct tcpcb *tp)
6043 struct tcp_rack *rack;
6046 * Assure no timers are running.
6048 if (tcp_timer_active(tp, TT_PERSIST)) {
6049 /* We enter in persists, set the flag appropriately */
6050 rack = (struct tcp_rack *)tp->t_fb_ptr;
6051 rack->rc_in_persist = 1;
6053 tcp_timer_suspend(tp, TT_PERSIST);
6054 tcp_timer_suspend(tp, TT_REXMT);
6055 tcp_timer_suspend(tp, TT_KEEP);
6056 tcp_timer_suspend(tp, TT_DELACK);
6060 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
6061 struct rack_sendmap *rsm, uint32_t ts)
6066 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
6068 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
6069 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
6070 rsm->r_flags |= RACK_OVERMAX;
6072 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) {
6073 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
6074 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
6076 idx = rsm->r_rtr_cnt - 1;
6077 rsm->r_tim_lastsent[idx] = ts;
6078 if (rsm->r_flags & RACK_ACKED) {
6079 /* Problably MTU discovery messing with us */
6080 rsm->r_flags &= ~RACK_ACKED;
6081 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
6083 if (rsm->r_in_tmap) {
6084 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
6087 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
6089 if (rsm->r_flags & RACK_SACK_PASSED) {
6090 /* We have retransmitted due to the SACK pass */
6091 rsm->r_flags &= ~RACK_SACK_PASSED;
6092 rsm->r_flags |= RACK_WAS_SACKPASS;
6098 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
6099 struct rack_sendmap *rsm, uint32_t ts, int32_t *lenp)
6102 * We (re-)transmitted starting at rsm->r_start for some length
6103 * (possibly less than r_end.
6105 struct rack_sendmap *nrsm, *insret;
6110 c_end = rsm->r_start + len;
6111 if (SEQ_GEQ(c_end, rsm->r_end)) {
6113 * We retransmitted the whole piece or more than the whole
6114 * slopping into the next rsm.
6116 rack_update_rsm(tp, rack, rsm, ts);
6117 if (c_end == rsm->r_end) {
6123 /* Hangs over the end return whats left */
6124 act_len = rsm->r_end - rsm->r_start;
6125 *lenp = (len - act_len);
6126 return (rsm->r_end);
6128 /* We don't get out of this block. */
6131 * Here we retransmitted less than the whole thing which means we
6132 * have to split this into what was transmitted and what was not.
6134 nrsm = rack_alloc_full_limit(rack);
6137 * We can't get memory, so lets not proceed.
6143 * So here we are going to take the original rsm and make it what we
6144 * retransmitted. nrsm will be the tail portion we did not
6145 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
6146 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
6147 * 1, 6 and the new piece will be 6, 11.
6149 rack_clone_rsm(rack, nrsm, rsm, c_end);
6151 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
6152 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
6154 if (insret != NULL) {
6155 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
6156 nrsm, insret, rack, rsm);
6159 if (rsm->r_in_tmap) {
6160 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
6161 nrsm->r_in_tmap = 1;
6163 rsm->r_flags &= (~RACK_HAS_FIN);
6164 rack_update_rsm(tp, rack, rsm, ts);
6171 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
6172 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts,
6173 uint8_t pass, struct rack_sendmap *hintrsm, uint32_t us_cts)
6175 struct tcp_rack *rack;
6176 struct rack_sendmap *rsm, *nrsm, *insret, fe;
6177 register uint32_t snd_max, snd_una;
6180 * Add to the RACK log of packets in flight or retransmitted. If
6181 * there is a TS option we will use the TS echoed, if not we will
6184 * Retransmissions will increment the count and move the ts to its
6185 * proper place. Note that if options do not include TS's then we
6186 * won't be able to effectively use the ACK for an RTT on a retran.
6188 * Notes about r_start and r_end. Lets consider a send starting at
6189 * sequence 1 for 10 bytes. In such an example the r_start would be
6190 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
6191 * This means that r_end is actually the first sequence for the next
6196 * If err is set what do we do XXXrrs? should we not add the thing?
6197 * -- i.e. return if err != 0 or should we pretend we sent it? --
6198 * i.e. proceed with add ** do this for now.
6200 INP_WLOCK_ASSERT(tp->t_inpcb);
6203 * We don't log errors -- we could but snd_max does not
6204 * advance in this case either.
6208 if (th_flags & TH_RST) {
6210 * We don't log resets and we return immediately from
6215 rack = (struct tcp_rack *)tp->t_fb_ptr;
6216 snd_una = tp->snd_una;
6217 if (SEQ_LEQ((seq_out + len), snd_una)) {
6218 /* Are sending an old segment to induce an ack (keep-alive)? */
6221 if (SEQ_LT(seq_out, snd_una)) {
6222 /* huh? should we panic? */
6225 end = seq_out + len;
6227 if (SEQ_GEQ(end, seq_out))
6228 len = end - seq_out;
6232 snd_max = tp->snd_max;
6233 if (th_flags & (TH_SYN | TH_FIN)) {
6235 * The call to rack_log_output is made before bumping
6236 * snd_max. This means we can record one extra byte on a SYN
6237 * or FIN if seq_out is adding more on and a FIN is present
6238 * (and we are not resending).
6240 if ((th_flags & TH_SYN) && (seq_out == tp->iss))
6242 if (th_flags & TH_FIN)
6244 if (SEQ_LT(snd_max, tp->snd_nxt)) {
6246 * The add/update as not been done for the FIN/SYN
6249 snd_max = tp->snd_nxt;
6253 /* We don't log zero window probes */
6256 rack->r_ctl.rc_time_last_sent = ts;
6257 if (IN_RECOVERY(tp->t_flags)) {
6258 rack->r_ctl.rc_prr_out += len;
6260 /* First question is it a retransmission or new? */
6261 if (seq_out == snd_max) {
6264 rsm = rack_alloc(rack);
6267 * Hmm out of memory and the tcb got destroyed while
6272 if (th_flags & TH_FIN) {
6273 rsm->r_flags = RACK_HAS_FIN;
6277 rsm->r_tim_lastsent[0] = ts;
6279 rsm->r_rtr_bytes = 0;
6280 rsm->usec_orig_send = us_cts;
6281 if (th_flags & TH_SYN) {
6282 /* The data space is one beyond snd_una */
6283 rsm->r_flags |= RACK_HAS_SIN;
6284 rsm->r_start = seq_out + 1;
6285 rsm->r_end = rsm->r_start + (len - 1);
6288 rsm->r_start = seq_out;
6289 rsm->r_end = rsm->r_start + len;
6292 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
6293 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
6295 if (insret != NULL) {
6296 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
6297 nrsm, insret, rack, rsm);
6300 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
6303 * Special case detection, is there just a single
6304 * packet outstanding when we are not in recovery?
6306 * If this is true mark it so.
6308 if ((IN_RECOVERY(tp->t_flags) == 0) &&
6309 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) {
6310 struct rack_sendmap *prsm;
6312 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
6314 prsm->r_one_out_nr = 1;
6319 * If we reach here its a retransmission and we need to find it.
6321 memset(&fe, 0, sizeof(fe));
6323 if (hintrsm && (hintrsm->r_start == seq_out)) {
6327 /* No hints sorry */
6330 if ((rsm) && (rsm->r_start == seq_out)) {
6331 seq_out = rack_update_entry(tp, rack, rsm, ts, &len);
6338 /* Ok it was not the last pointer go through it the hard way. */
6340 fe.r_start = seq_out;
6341 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
6343 if (rsm->r_start == seq_out) {
6344 seq_out = rack_update_entry(tp, rack, rsm, ts, &len);
6351 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
6352 /* Transmitted within this piece */
6354 * Ok we must split off the front and then let the
6355 * update do the rest
6357 nrsm = rack_alloc_full_limit(rack);
6359 rack_update_rsm(tp, rack, rsm, ts);
6363 * copy rsm to nrsm and then trim the front of rsm
6364 * to not include this part.
6366 rack_clone_rsm(rack, nrsm, rsm, seq_out);
6367 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
6369 if (insret != NULL) {
6370 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
6371 nrsm, insret, rack, rsm);
6374 if (rsm->r_in_tmap) {
6375 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
6376 nrsm->r_in_tmap = 1;
6378 rsm->r_flags &= (~RACK_HAS_FIN);
6379 seq_out = rack_update_entry(tp, rack, nrsm, ts, &len);
6387 * Hmm not found in map did they retransmit both old and on into the
6390 if (seq_out == tp->snd_max) {
6392 } else if (SEQ_LT(seq_out, tp->snd_max)) {
6394 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
6395 seq_out, len, tp->snd_una, tp->snd_max);
6396 printf("Starting Dump of all rack entries\n");
6397 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
6398 printf("rsm:%p start:%u end:%u\n",
6399 rsm, rsm->r_start, rsm->r_end);
6401 printf("Dump complete\n");
6402 panic("seq_out not found rack:%p tp:%p",
6408 * Hmm beyond sndmax? (only if we are using the new rtt-pack
6411 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
6412 seq_out, len, tp->snd_max, tp);
6418 * Record one of the RTT updates from an ack into
6419 * our sample structure.
6423 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt,
6424 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt)
6426 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
6427 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
6428 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
6430 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
6431 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
6432 rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
6434 if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
6435 if (us_rtt < rack->r_ctl.rc_gp_lowrtt)
6436 rack->r_ctl.rc_gp_lowrtt = us_rtt;
6437 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd)
6438 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
6440 if ((confidence == 1) &&
6442 (rsm->r_just_ret) ||
6443 (rsm->r_one_out_nr &&
6444 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) {
6446 * If the rsm had a just return
6447 * hit it then we can't trust the
6448 * rtt measurement for buffer deterimination
6449 * Note that a confidence of 2, indicates
6450 * SACK'd which overrides the r_just_ret or
6451 * the r_one_out_nr. If it was a CUM-ACK and
6452 * we had only two outstanding, but get an
6453 * ack for only 1. Then that also lowers our
6458 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
6459 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) {
6460 if (rack->r_ctl.rack_rs.confidence == 0) {
6462 * We take anything with no current confidence
6465 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
6466 rack->r_ctl.rack_rs.confidence = confidence;
6467 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
6468 } else if (confidence || rack->r_ctl.rack_rs.confidence) {
6470 * Once we have a confident number,
6471 * we can update it with a smaller
6472 * value since this confident number
6473 * may include the DSACK time until
6474 * the next segment (the second one) arrived.
6476 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
6477 rack->r_ctl.rack_rs.confidence = confidence;
6478 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
6482 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence);
6483 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
6484 rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
6485 rack->r_ctl.rack_rs.rs_rtt_cnt++;
6489 * Collect new round-trip time estimate
6490 * and update averages and current timeout.
6493 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
6496 uint32_t o_srtt, o_var;
6497 int32_t hrtt_up = 0;
6500 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
6501 /* No valid sample */
6503 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
6504 /* We are to use the lowest RTT seen in a single ack */
6505 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
6506 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
6507 /* We are to use the highest RTT seen in a single ack */
6508 rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
6509 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
6510 /* We are to use the average RTT seen in a single ack */
6511 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
6512 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
6515 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
6521 if (rack->rc_gp_rtt_set == 0) {
6523 * With no RTT we have to accept
6524 * even one we are not confident of.
6526 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt;
6527 rack->rc_gp_rtt_set = 1;
6528 } else if (rack->r_ctl.rack_rs.confidence) {
6529 /* update the running gp srtt */
6530 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8);
6531 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8;
6533 if (rack->r_ctl.rack_rs.confidence) {
6535 * record the low and high for highly buffered path computation,
6536 * we only do this if we are confident (not a retransmission).
6538 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) {
6539 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
6542 if (rack->rc_highly_buffered == 0) {
6544 * Currently once we declare a path has
6545 * highly buffered there is no going
6546 * back, which may be a problem...
6548 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) {
6549 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt,
6550 rack->r_ctl.rc_highest_us_rtt,
6551 rack->r_ctl.rc_lowest_us_rtt,
6553 rack->rc_highly_buffered = 1;
6557 if ((rack->r_ctl.rack_rs.confidence) ||
6558 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) {
6560 * If we are highly confident of it <or> it was
6561 * never retransmitted we accept it as the last us_rtt.
6563 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
6564 /* The lowest rtt can be set if its was not retransmited */
6565 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) {
6566 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
6567 if (rack->r_ctl.rc_lowest_us_rtt == 0)
6568 rack->r_ctl.rc_lowest_us_rtt = 1;
6571 rack_log_rtt_sample(rack, rtt);
6572 o_srtt = tp->t_srtt;
6573 o_var = tp->t_rttvar;
6574 rack = (struct tcp_rack *)tp->t_fb_ptr;
6575 if (tp->t_srtt != 0) {
6577 * srtt is stored as fixed point with 5 bits after the
6578 * binary point (i.e., scaled by 8). The following magic is
6579 * equivalent to the smoothing algorithm in rfc793 with an
6580 * alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed point).
6581 * Adjust rtt to origin 0.
6583 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
6584 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
6586 tp->t_srtt += delta;
6587 if (tp->t_srtt <= 0)
6591 * We accumulate a smoothed rtt variance (actually, a
6592 * smoothed mean difference), then set the retransmit timer
6593 * to smoothed rtt + 4 times the smoothed variance. rttvar
6594 * is stored as fixed point with 4 bits after the binary
6595 * point (scaled by 16). The following is equivalent to
6596 * rfc793 smoothing with an alpha of .75 (rttvar =
6597 * rttvar*3/4 + |delta| / 4). This replaces rfc793's
6602 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
6603 tp->t_rttvar += delta;
6604 if (tp->t_rttvar <= 0)
6606 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
6607 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
6610 * No rtt measurement yet - use the unsmoothed rtt. Set the
6611 * variance to half the rtt (so our first retransmit happens
6614 tp->t_srtt = rtt << TCP_RTT_SHIFT;
6615 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
6616 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
6618 KMOD_TCPSTAT_INC(tcps_rttupdated);
6621 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
6626 * the retransmit should happen at rtt + 4 * rttvar. Because of the
6627 * way we do the smoothing, srtt and rttvar will each average +1/2
6628 * tick of bias. When we compute the retransmit timer, we want 1/2
6629 * tick of rounding and 1 extra tick because of +-1/2 tick
6630 * uncertainty in the firing of the timer. The bias will give us
6631 * exactly the 1.5 tick we need. But, because the bias is
6632 * statistical, we have to test that we don't drop below the minimum
6633 * feasible timer (which is 2 ticks).
6635 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
6636 max(MSEC_2_TICKS(rack_rto_min), rtt + 2), MSEC_2_TICKS(rack_rto_max));
6637 tp->t_softerror = 0;
6641 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm,
6642 uint32_t t, uint32_t cts)
6645 * For this RSM, we acknowledged the data from a previous
6646 * transmission, not the last one we made. This means we did a false
6649 struct tcp_rack *rack;
6651 if (rsm->r_flags & RACK_HAS_FIN) {
6653 * The sending of the FIN often is multiple sent when we
6654 * have everything outstanding ack'd. We ignore this case
6655 * since its over now.
6659 if (rsm->r_flags & RACK_TLP) {
6661 * We expect TLP's to have this occur.
6665 rack = (struct tcp_rack *)tp->t_fb_ptr;
6666 /* should we undo cc changes and exit recovery? */
6667 if (IN_RECOVERY(tp->t_flags)) {
6668 if (rack->r_ctl.rc_rsm_start == rsm->r_start) {
6670 * Undo what we ratched down and exit recovery if
6673 EXIT_RECOVERY(tp->t_flags);
6674 tp->snd_recover = tp->snd_una;
6675 if (rack->r_ctl.rc_cwnd_at > tp->snd_cwnd)
6676 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at;
6677 if (rack->r_ctl.rc_ssthresh_at > tp->snd_ssthresh)
6678 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at;
6681 if (rsm->r_flags & RACK_WAS_SACKPASS) {
6683 * We retransmitted based on a sack and the earlier
6684 * retransmission ack'd it - re-ordering is occuring.
6686 counter_u64_add(rack_reorder_seen, 1);
6687 rack->r_ctl.rc_reorder_ts = cts;
6689 counter_u64_add(rack_badfr, 1);
6690 counter_u64_add(rack_badfr_bytes, (rsm->r_end - rsm->r_start));
6694 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts)
6697 * Apply to filter the inbound us-rtt at us_cts.
6701 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
6702 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt,
6704 if (rack->r_ctl.last_pacing_time &&
6705 rack->rc_gp_dyn_mul &&
6706 (rack->r_ctl.last_pacing_time > us_rtt))
6707 rack->pacing_longer_than_rtt = 1;
6709 rack->pacing_longer_than_rtt = 0;
6710 if (old_rtt > us_rtt) {
6711 /* We just hit a new lower rtt time */
6712 rack_log_rtt_shrinks(rack, us_cts, old_rtt,
6713 __LINE__, RACK_RTTS_NEWRTT);
6715 * Only count it if its lower than what we saw within our
6718 if ((old_rtt - us_rtt) > rack_min_rtt_movement) {
6719 if (rack_probertt_lower_within &&
6720 rack->rc_gp_dyn_mul &&
6721 (rack->use_fixed_rate == 0) &&
6722 (rack->rc_always_pace)) {
6724 * We are seeing a new lower rtt very close
6725 * to the time that we would have entered probe-rtt.
6726 * This is probably due to the fact that a peer flow
6727 * has entered probe-rtt. Lets go in now too.
6731 val = rack_probertt_lower_within * rack_time_between_probertt;
6733 if ((rack->in_probe_rtt == 0) &&
6734 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) {
6735 rack_enter_probertt(rack, us_cts);
6738 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
6744 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
6745 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack)
6748 uint32_t t, len_acked;
6750 if ((rsm->r_flags & RACK_ACKED) ||
6751 (rsm->r_flags & RACK_WAS_ACKED))
6755 if (ack_type == CUM_ACKED) {
6756 if (SEQ_GT(th_ack, rsm->r_end))
6757 len_acked = rsm->r_end - rsm->r_start;
6759 len_acked = th_ack - rsm->r_start;
6761 len_acked = rsm->r_end - rsm->r_start;
6762 if (rsm->r_rtr_cnt == 1) {
6765 t = cts - rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
6768 if (!tp->t_rttlow || tp->t_rttlow > t)
6770 if (!rack->r_ctl.rc_rack_min_rtt ||
6771 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
6772 rack->r_ctl.rc_rack_min_rtt = t;
6773 if (rack->r_ctl.rc_rack_min_rtt == 0) {
6774 rack->r_ctl.rc_rack_min_rtt = 1;
6777 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - rsm->usec_orig_send;
6780 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
6781 if (ack_type == SACKED)
6782 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt);
6785 * For cum-ack we are only confident if what
6786 * is being acked is included in a measurement.
6787 * Otherwise it could be an idle period that
6788 * includes Delayed-ack time.
6790 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt,
6791 (rack->app_limited_needs_set ? 0 : 1), rsm, rsm->r_rtr_cnt);
6793 if ((rsm->r_flags & RACK_TLP) &&
6794 (!IN_RECOVERY(tp->t_flags))) {
6795 /* Segment was a TLP and our retrans matched */
6796 if (rack->r_ctl.rc_tlp_cwnd_reduce) {
6797 rack->r_ctl.rc_rsm_start = tp->snd_max;
6798 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
6799 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
6800 rack_cong_signal(tp, NULL, CC_NDUPACK);
6802 * When we enter recovery we need to assure
6803 * we send one packet.
6805 if (rack->rack_no_prr == 0) {
6806 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
6807 rack_log_to_prr(rack, 7, 0);
6811 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
6812 /* New more recent rack_tmit_time */
6813 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
6814 rack->rc_rack_rtt = t;
6819 * We clear the soft/rxtshift since we got an ack.
6820 * There is no assurance we will call the commit() function
6821 * so we need to clear these to avoid incorrect handling.
6824 tp->t_softerror = 0;
6825 if ((to->to_flags & TOF_TS) &&
6826 (ack_type == CUM_ACKED) &&
6828 ((rsm->r_flags & RACK_OVERMAX) == 0)) {
6830 * Now which timestamp does it match? In this block the ACK
6831 * must be coming from a previous transmission.
6833 for (i = 0; i < rsm->r_rtr_cnt; i++) {
6834 if (rsm->r_tim_lastsent[i] == to->to_tsecr) {
6835 t = cts - rsm->r_tim_lastsent[i];
6838 if ((i + 1) < rsm->r_rtr_cnt) {
6840 rack_earlier_retran(tp, rsm, t, cts);
6842 if (!tp->t_rttlow || tp->t_rttlow > t)
6844 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
6845 rack->r_ctl.rc_rack_min_rtt = t;
6846 if (rack->r_ctl.rc_rack_min_rtt == 0) {
6847 rack->r_ctl.rc_rack_min_rtt = 1;
6850 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
6851 rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
6852 /* New more recent rack_tmit_time */
6853 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
6854 rack->rc_rack_rtt = t;
6856 tcp_rack_xmit_timer(rack, t + 1, len_acked, (t * HPTS_USEC_IN_MSEC), 0, rsm,
6864 * Ok its a SACK block that we retransmitted. or a windows
6865 * machine without timestamps. We can tell nothing from the
6866 * time-stamp since its not there or the time the peer last
6867 * recieved a segment that moved forward its cum-ack point.
6870 i = rsm->r_rtr_cnt - 1;
6871 t = cts - rsm->r_tim_lastsent[i];
6874 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
6876 * We retransmitted and the ack came back in less
6877 * than the smallest rtt we have observed. We most
6878 * likey did an improper retransmit as outlined in
6879 * 4.2 Step 3 point 2 in the rack-draft.
6881 i = rsm->r_rtr_cnt - 2;
6882 t = cts - rsm->r_tim_lastsent[i];
6883 rack_earlier_retran(tp, rsm, t, cts);
6884 } else if (rack->r_ctl.rc_rack_min_rtt) {
6886 * We retransmitted it and the retransmit did the
6889 if (!rack->r_ctl.rc_rack_min_rtt ||
6890 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
6891 rack->r_ctl.rc_rack_min_rtt = t;
6892 if (rack->r_ctl.rc_rack_min_rtt == 0) {
6893 rack->r_ctl.rc_rack_min_rtt = 1;
6896 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[i])) {
6897 /* New more recent rack_tmit_time */
6898 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[i];
6899 rack->rc_rack_rtt = t;
6908 * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
6911 rack_log_sack_passed(struct tcpcb *tp,
6912 struct tcp_rack *rack, struct rack_sendmap *rsm)
6914 struct rack_sendmap *nrsm;
6917 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
6918 rack_head, r_tnext) {
6920 /* Skip orginal segment he is acked */
6923 if (nrsm->r_flags & RACK_ACKED) {
6925 * Skip ack'd segments, though we
6926 * should not see these, since tmap
6927 * should not have ack'd segments.
6931 if (nrsm->r_flags & RACK_SACK_PASSED) {
6933 * We found one that is already marked
6934 * passed, we have been here before and
6935 * so all others below this are marked.
6939 nrsm->r_flags |= RACK_SACK_PASSED;
6940 nrsm->r_flags &= ~RACK_WAS_SACKPASS;
6945 rack_need_set_test(struct tcpcb *tp,
6946 struct tcp_rack *rack,
6947 struct rack_sendmap *rsm,
6953 if ((tp->t_flags & TF_GPUTINPROG) &&
6954 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
6956 * We were app limited, and this ack
6957 * butts up or goes beyond the point where we want
6958 * to start our next measurement. We need
6959 * to record the new gput_ts as here and
6960 * possibly update the start sequence.
6964 if (rsm->r_rtr_cnt > 1) {
6966 * This is a retransmit, can we
6967 * really make any assessment at this
6968 * point? We are not really sure of
6969 * the timestamp, is it this or the
6970 * previous transmission?
6972 * Lets wait for something better that
6973 * is not retransmitted.
6979 rack->app_limited_needs_set = 0;
6980 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
6981 /* Do we start at a new end? */
6982 if ((use_which == RACK_USE_BEG) &&
6983 SEQ_GEQ(rsm->r_start, tp->gput_seq)) {
6985 * When we get an ACK that just eats
6986 * up some of the rsm, we set RACK_USE_BEG
6987 * since whats at r_start (i.e. th_ack)
6988 * is left unacked and thats where the
6989 * measurement not starts.
6991 tp->gput_seq = rsm->r_start;
6992 rack->r_ctl.rc_gp_output_ts = rsm->usec_orig_send;
6994 if ((use_which == RACK_USE_END) &&
6995 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
6997 * We use the end when the cumack
6998 * is moving forward and completely
6999 * deleting the rsm passed so basically
7000 * r_end holds th_ack.
7002 * For SACK's we also want to use the end
7003 * since this piece just got sacked and
7004 * we want to target anything after that
7005 * in our measurement.
7007 tp->gput_seq = rsm->r_end;
7008 rack->r_ctl.rc_gp_output_ts = rsm->usec_orig_send;
7010 if (use_which == RACK_USE_END_OR_THACK) {
7012 * special case for ack moving forward,
7013 * not a sack, we need to move all the
7014 * way up to where this ack cum-ack moves
7017 if (SEQ_GT(th_ack, rsm->r_end))
7018 tp->gput_seq = th_ack;
7020 tp->gput_seq = rsm->r_end;
7021 rack->r_ctl.rc_gp_output_ts = rsm->usec_orig_send;
7023 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) {
7025 * We moved beyond this guy's range, re-calculate
7026 * the new end point.
7028 if (rack->rc_gp_filled == 0) {
7029 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
7031 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
7035 * We are moving the goal post, we may be able to clear the
7036 * measure_saw_probe_rtt flag.
7038 if ((rack->in_probe_rtt == 0) &&
7039 (rack->measure_saw_probe_rtt) &&
7040 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
7041 rack->measure_saw_probe_rtt = 0;
7042 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts,
7043 seq, tp->gput_seq, 0, 5, line, NULL);
7044 if (rack->rc_gp_filled &&
7045 ((tp->gput_ack - tp->gput_seq) <
7046 max(rc_init_window(rack), (MIN_GP_WIN *
7047 ctf_fixed_maxseg(tp))))) {
7049 * There is no sense of continuing this measurement
7050 * because its too small to gain us anything we
7051 * trust. Skip it and that way we can start a new
7052 * measurement quicker.
7054 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
7055 0, 0, 0, 6, __LINE__, NULL);
7056 tp->t_flags &= ~TF_GPUTINPROG;
7062 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
7063 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two)
7065 uint32_t start, end, changed = 0;
7066 struct rack_sendmap stack_map;
7067 struct rack_sendmap *rsm, *nrsm, fe, *insret, *prev, *next;
7068 int32_t used_ref = 1;
7071 start = sack->start;
7074 memset(&fe, 0, sizeof(fe));
7076 if ((rsm == NULL) ||
7077 (SEQ_LT(end, rsm->r_start)) ||
7078 (SEQ_GEQ(start, rsm->r_end)) ||
7079 (SEQ_LT(start, rsm->r_start))) {
7081 * We are not in the right spot,
7082 * find the correct spot in the tree.
7086 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
7093 /* Ok we have an ACK for some piece of this rsm */
7094 if (rsm->r_start != start) {
7095 if ((rsm->r_flags & RACK_ACKED) == 0) {
7097 * Need to split this in two pieces the before and after,
7098 * the before remains in the map, the after must be
7099 * added. In other words we have:
7100 * rsm |--------------|
7104 * and nrsm will be the sacked piece
7107 * But before we start down that path lets
7108 * see if the sack spans over on top of
7109 * the next guy and it is already sacked.
7111 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7112 if (next && (next->r_flags & RACK_ACKED) &&
7113 SEQ_GEQ(end, next->r_start)) {
7115 * So the next one is already acked, and
7116 * we can thus by hookery use our stack_map
7117 * to reflect the piece being sacked and
7118 * then adjust the two tree entries moving
7119 * the start and ends around. So we start like:
7120 * rsm |------------| (not-acked)
7121 * next |-----------| (acked)
7122 * sackblk |-------->
7123 * We want to end like so:
7124 * rsm |------| (not-acked)
7125 * next |-----------------| (acked)
7127 * Where nrsm is a temporary stack piece we
7128 * use to update all the gizmos.
7130 /* Copy up our fudge block */
7132 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
7133 /* Now adjust our tree blocks */
7135 next->r_start = start;
7136 /* Clear out the dup ack count of the remainder */
7138 rsm->r_just_ret = 0;
7139 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7140 /* Now lets make sure our fudge block is right */
7141 nrsm->r_start = start;
7142 /* Now lets update all the stats and such */
7143 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
7144 if (rack->app_limited_needs_set)
7145 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
7146 changed += (nrsm->r_end - nrsm->r_start);
7147 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
7148 if (nrsm->r_flags & RACK_SACK_PASSED) {
7149 counter_u64_add(rack_reorder_seen, 1);
7150 rack->r_ctl.rc_reorder_ts = cts;
7153 * Now we want to go up from rsm (the
7154 * one left un-acked) to the next one
7155 * in the tmap. We do this so when
7156 * we walk backwards we include marking
7157 * sack-passed on rsm (The one passed in
7158 * is skipped since it is generally called
7159 * on something sacked before removing it
7162 if (rsm->r_in_tmap) {
7163 nrsm = TAILQ_NEXT(rsm, r_tnext);
7165 * Now that we have the next
7166 * one walk backwards from there.
7168 if (nrsm && nrsm->r_in_tmap)
7169 rack_log_sack_passed(tp, rack, nrsm);
7171 /* Now are we done? */
7172 if (SEQ_LT(end, next->r_end) ||
7173 (end == next->r_end)) {
7174 /* Done with block */
7177 counter_u64_add(rack_sack_used_next_merge, 1);
7178 /* Postion for the next block */
7179 start = next->r_end;
7180 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next);
7185 * We can't use any hookery here, so we
7186 * need to split the map. We enter like
7190 * We will add the new block nrsm and
7191 * that will be the new portion, and then
7192 * fall through after reseting rsm. So we
7193 * split and look like this:
7197 * We then fall through reseting
7198 * rsm to nrsm, so the next block
7201 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
7204 * failed XXXrrs what can we do but loose the sack
7209 counter_u64_add(rack_sack_splits, 1);
7210 rack_clone_rsm(rack, nrsm, rsm, start);
7211 rsm->r_just_ret = 0;
7212 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7214 if (insret != NULL) {
7215 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7216 nrsm, insret, rack, rsm);
7219 if (rsm->r_in_tmap) {
7220 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7221 nrsm->r_in_tmap = 1;
7223 rsm->r_flags &= (~RACK_HAS_FIN);
7224 /* Position us to point to the new nrsm that starts the sack blk */
7228 /* Already sacked this piece */
7229 counter_u64_add(rack_sack_skipped_acked, 1);
7231 if (end == rsm->r_end) {
7232 /* Done with block */
7233 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7235 } else if (SEQ_LT(end, rsm->r_end)) {
7236 /* A partial sack to a already sacked block */
7238 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7242 * The end goes beyond this guy
7243 * repostion the start to the
7247 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7253 if (SEQ_GEQ(end, rsm->r_end)) {
7255 * The end of this block is either beyond this guy or right
7256 * at this guy. I.e.:
7262 if ((rsm->r_flags & RACK_ACKED) == 0) {
7263 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
7264 changed += (rsm->r_end - rsm->r_start);
7265 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
7266 if (rsm->r_in_tmap) /* should be true */
7267 rack_log_sack_passed(tp, rack, rsm);
7268 /* Is Reordering occuring? */
7269 if (rsm->r_flags & RACK_SACK_PASSED) {
7270 rsm->r_flags &= ~RACK_SACK_PASSED;
7271 counter_u64_add(rack_reorder_seen, 1);
7272 rack->r_ctl.rc_reorder_ts = cts;
7274 if (rack->app_limited_needs_set)
7275 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
7276 rsm->r_ack_arrival = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
7277 rsm->r_flags |= RACK_ACKED;
7278 rsm->r_flags &= ~RACK_TLP;
7279 if (rsm->r_in_tmap) {
7280 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7284 counter_u64_add(rack_sack_skipped_acked, 1);
7287 if (end == rsm->r_end) {
7288 /* This block only - done, setup for next */
7292 * There is more not coverend by this rsm move on
7293 * to the next block in the RB tree.
7295 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7303 * The end of this sack block is smaller than
7308 if ((rsm->r_flags & RACK_ACKED) == 0) {
7309 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7310 if (prev && (prev->r_flags & RACK_ACKED)) {
7312 * Goal, we want the right remainder of rsm to shrink
7313 * in place and span from (rsm->r_start = end) to rsm->r_end.
7314 * We want to expand prev to go all the way
7315 * to prev->r_end <- end.
7316 * so in the tree we have before:
7317 * prev |--------| (acked)
7318 * rsm |-------| (non-acked)
7320 * We churn it so we end up with
7321 * prev |----------| (acked)
7322 * rsm |-----| (non-acked)
7323 * nrsm |-| (temporary)
7326 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
7329 /* Now adjust nrsm (stack copy) to be
7330 * the one that is the small
7331 * piece that was "sacked".
7335 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7337 * Now nrsm is our new little piece
7338 * that is acked (which was merged
7339 * to prev). Update the rtt and changed
7340 * based on that. Also check for reordering.
7342 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
7343 if (rack->app_limited_needs_set)
7344 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
7345 changed += (nrsm->r_end - nrsm->r_start);
7346 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
7347 if (nrsm->r_flags & RACK_SACK_PASSED) {
7348 counter_u64_add(rack_reorder_seen, 1);
7349 rack->r_ctl.rc_reorder_ts = cts;
7352 counter_u64_add(rack_sack_used_prev_merge, 1);
7355 * This is the case where our previous
7356 * block is not acked either, so we must
7357 * split the block in two.
7359 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
7361 /* failed rrs what can we do but loose the sack info? */
7365 * In this case nrsm becomes
7366 * nrsm->r_start = end;
7367 * nrsm->r_end = rsm->r_end;
7368 * which is un-acked.
7370 * rsm->r_end = nrsm->r_start;
7371 * i.e. the remaining un-acked
7372 * piece is left on the left
7375 * So we start like this
7376 * rsm |----------| (not acked)
7378 * build it so we have
7380 * nrsm |------| (not acked)
7382 counter_u64_add(rack_sack_splits, 1);
7383 rack_clone_rsm(rack, nrsm, rsm, end);
7384 rsm->r_flags &= (~RACK_HAS_FIN);
7385 rsm->r_just_ret = 0;
7386 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7388 if (insret != NULL) {
7389 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7390 nrsm, insret, rack, rsm);
7393 if (rsm->r_in_tmap) {
7394 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7395 nrsm->r_in_tmap = 1;
7398 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
7399 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
7400 changed += (rsm->r_end - rsm->r_start);
7401 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
7402 if (rsm->r_in_tmap) /* should be true */
7403 rack_log_sack_passed(tp, rack, rsm);
7404 /* Is Reordering occuring? */
7405 if (rsm->r_flags & RACK_SACK_PASSED) {
7406 rsm->r_flags &= ~RACK_SACK_PASSED;
7407 counter_u64_add(rack_reorder_seen, 1);
7408 rack->r_ctl.rc_reorder_ts = cts;
7410 if (rack->app_limited_needs_set)
7411 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
7412 rsm->r_ack_arrival = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
7413 rsm->r_flags |= RACK_ACKED;
7414 rsm->r_flags &= ~RACK_TLP;
7415 if (rsm->r_in_tmap) {
7416 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7420 } else if (start != end){
7422 * The block was already acked.
7424 counter_u64_add(rack_sack_skipped_acked, 1);
7428 if (rsm && (rsm->r_flags & RACK_ACKED)) {
7430 * Now can we merge where we worked
7431 * with either the previous or
7434 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7436 if (next->r_flags & RACK_ACKED) {
7437 /* yep this and next can be merged */
7438 rsm = rack_merge_rsm(rack, rsm, next);
7439 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7443 /* Now what about the previous? */
7444 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7446 if (prev->r_flags & RACK_ACKED) {
7447 /* yep the previous and this can be merged */
7448 rsm = rack_merge_rsm(rack, prev, rsm);
7449 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7454 if (used_ref == 0) {
7455 counter_u64_add(rack_sack_proc_all, 1);
7457 counter_u64_add(rack_sack_proc_short, 1);
7459 /* Save off the next one for quick reference. */
7461 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7464 *prsm = rack->r_ctl.rc_sacklast = nrsm;
7465 /* Pass back the moved. */
7471 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
7473 struct rack_sendmap *tmap;
7476 while (rsm && (rsm->r_flags & RACK_ACKED)) {
7477 /* Its no longer sacked, mark it so */
7478 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
7480 if (rsm->r_in_tmap) {
7481 panic("rack:%p rsm:%p flags:0x%x in tmap?",
7482 rack, rsm, rsm->r_flags);
7485 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
7486 /* Rebuild it into our tmap */
7488 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7491 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
7494 tmap->r_in_tmap = 1;
7495 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7498 * Now lets possibly clear the sack filter so we start
7499 * recognizing sacks that cover this area.
7501 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
7506 rack_do_decay(struct tcp_rack *rack)
7510 #define timersub(tvp, uvp, vvp) \
7512 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
7513 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
7514 if ((vvp)->tv_usec < 0) { \
7516 (vvp)->tv_usec += 1000000; \
7520 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res);
7523 rack->r_ctl.input_pkt++;
7524 if ((rack->rc_in_persist) ||
7525 (res.tv_sec >= 1) ||
7526 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) {
7528 * Check for decay of non-SAD,
7529 * we want all SAD detection metrics to
7530 * decay 1/4 per second (or more) passed.
7534 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt;
7535 /* Update our saved tracking values */
7536 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt;
7537 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
7538 /* Now do we escape without decay? */
7539 #ifdef NETFLIX_EXP_DETECTION
7540 if (rack->rc_in_persist ||
7541 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) ||
7542 (pkt_delta < tcp_sad_low_pps)){
7544 * We don't decay idle connections
7545 * or ones that have a low input pps.
7549 /* Decay the counters */
7550 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count,
7552 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count,
7554 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra,
7556 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move,
7563 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
7565 uint32_t changed, entered_recovery = 0;
7566 struct tcp_rack *rack;
7567 struct rack_sendmap *rsm, *rm;
7568 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
7569 register uint32_t th_ack;
7570 int32_t i, j, k, num_sack_blks = 0;
7571 uint32_t cts, acked, ack_point, sack_changed = 0;
7572 int loop_start = 0, moved_two = 0;
7576 INP_WLOCK_ASSERT(tp->t_inpcb);
7577 if (th->th_flags & TH_RST) {
7578 /* We don't log resets */
7581 rack = (struct tcp_rack *)tp->t_fb_ptr;
7582 cts = tcp_ts_getticks();
7583 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
7585 th_ack = th->th_ack;
7586 if (rack->sack_attack_disable == 0)
7587 rack_do_decay(rack);
7588 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) {
7590 * You only get credit for
7591 * MSS and greater (and you get extra
7592 * credit for larger cum-ack moves).
7596 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp);
7597 rack->r_ctl.ack_count += ac;
7598 counter_u64_add(rack_ack_total, ac);
7600 if (rack->r_ctl.ack_count > 0xfff00000) {
7602 * reduce the number to keep us under
7605 rack->r_ctl.ack_count /= 2;
7606 rack->r_ctl.sack_count /= 2;
7608 if (SEQ_GT(th_ack, tp->snd_una)) {
7609 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
7610 tp->t_acktime = ticks;
7612 if (rsm && SEQ_GT(th_ack, rsm->r_start))
7613 changed = th_ack - rsm->r_start;
7616 * The ACK point is advancing to th_ack, we must drop off
7617 * the packets in the rack log and calculate any eligble
7620 rack->r_wanted_output = 1;
7622 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
7624 if ((th_ack - 1) == tp->iss) {
7626 * For the SYN incoming case we will not
7627 * have called tcp_output for the sending of
7628 * the SYN, so there will be no map. All
7629 * other cases should probably be a panic.
7633 if (tp->t_flags & TF_SENTFIN) {
7634 /* if we send a FIN we will not hav a map */
7638 panic("No rack map tp:%p for th:%p state:%d rack:%p snd_una:%u snd_max:%u snd_nxt:%u chg:%d\n",
7640 th, tp->t_state, rack,
7641 tp->snd_una, tp->snd_max, tp->snd_nxt, changed);
7645 if (SEQ_LT(th_ack, rsm->r_start)) {
7646 /* Huh map is missing this */
7648 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
7650 th_ack, tp->t_state, rack->r_state);
7654 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack);
7655 /* Now do we consume the whole thing? */
7656 if (SEQ_GEQ(th_ack, rsm->r_end)) {
7657 /* Its all consumed. */
7659 uint8_t newly_acked;
7661 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
7662 rsm->r_rtr_bytes = 0;
7663 /* Record the time of highest cumack sent */
7664 rack->r_ctl.rc_gp_cumack_ts = rsm->usec_orig_send;
7665 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7668 panic("removing head in rack:%p rsm:%p rm:%p",
7672 if (rsm->r_in_tmap) {
7673 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7677 if (rsm->r_flags & RACK_ACKED) {
7679 * It was acked on the scoreboard -- remove
7682 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
7684 } else if (rsm->r_flags & RACK_SACK_PASSED) {
7686 * There are segments ACKED on the
7687 * scoreboard further up. We are seeing
7690 rsm->r_flags &= ~RACK_SACK_PASSED;
7691 counter_u64_add(rack_reorder_seen, 1);
7692 rsm->r_ack_arrival = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
7693 rsm->r_flags |= RACK_ACKED;
7694 rack->r_ctl.rc_reorder_ts = cts;
7696 left = th_ack - rsm->r_end;
7697 if (rack->app_limited_needs_set && newly_acked)
7698 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK);
7699 /* Free back to zone */
7700 rack_free(rack, rsm);
7706 if (rsm->r_flags & RACK_ACKED) {
7708 * It was acked on the scoreboard -- remove it from
7709 * total for the part being cum-acked.
7711 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
7714 * Clear the dup ack count for
7715 * the piece that remains.
7718 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7719 if (rsm->r_rtr_bytes) {
7721 * It was retransmitted adjust the
7722 * sack holes for what was acked.
7726 ack_am = (th_ack - rsm->r_start);
7727 if (ack_am >= rsm->r_rtr_bytes) {
7728 rack->r_ctl.rc_holes_rxt -= ack_am;
7729 rsm->r_rtr_bytes -= ack_am;
7733 * Update where the piece starts and record
7734 * the time of send of highest cumack sent.
7736 rack->r_ctl.rc_gp_cumack_ts = rsm->usec_orig_send;
7737 rsm->r_start = th_ack;
7738 if (rack->app_limited_needs_set)
7739 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG);
7743 /* Check for reneging */
7744 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
7745 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
7747 * The peer has moved snd_una up to
7748 * the edge of this send, i.e. one
7749 * that it had previously acked. The only
7750 * way that can be true if the peer threw
7751 * away data (space issues) that it had
7752 * previously sacked (else it would have
7753 * given us snd_una up to (rsm->r_end).
7754 * We need to undo the acked markings here.
7756 * Note we have to look to make sure th_ack is
7757 * our rsm->r_start in case we get an old ack
7758 * where th_ack is behind snd_una.
7760 rack_peer_reneges(rack, rsm, th->th_ack);
7762 if ((to->to_flags & TOF_SACK) == 0) {
7763 /* We are done nothing left */
7766 /* Sack block processing */
7767 if (SEQ_GT(th_ack, tp->snd_una))
7770 ack_point = tp->snd_una;
7771 for (i = 0; i < to->to_nsacks; i++) {
7772 bcopy((to->to_sacks + i * TCPOLEN_SACK),
7773 &sack, sizeof(sack));
7774 sack.start = ntohl(sack.start);
7775 sack.end = ntohl(sack.end);
7776 if (SEQ_GT(sack.end, sack.start) &&
7777 SEQ_GT(sack.start, ack_point) &&
7778 SEQ_LT(sack.start, tp->snd_max) &&
7779 SEQ_GT(sack.end, ack_point) &&
7780 SEQ_LEQ(sack.end, tp->snd_max)) {
7781 sack_blocks[num_sack_blks] = sack;
7783 #ifdef NETFLIX_STATS
7784 } else if (SEQ_LEQ(sack.start, th_ack) &&
7785 SEQ_LEQ(sack.end, th_ack)) {
7787 * Its a D-SACK block.
7789 tcp_record_dsack(sack.start, sack.end);
7795 * Sort the SACK blocks so we can update the rack scoreboard with
7798 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks,
7799 num_sack_blks, th->th_ack);
7800 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks);
7801 if (num_sack_blks == 0) {
7802 /* Nothing to sack (DSACKs?) */
7803 goto out_with_totals;
7805 if (num_sack_blks < 2) {
7806 /* Only one, we don't need to sort */
7809 /* Sort the sacks */
7810 for (i = 0; i < num_sack_blks; i++) {
7811 for (j = i + 1; j < num_sack_blks; j++) {
7812 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
7813 sack = sack_blocks[i];
7814 sack_blocks[i] = sack_blocks[j];
7815 sack_blocks[j] = sack;
7820 * Now are any of the sack block ends the same (yes some
7821 * implementations send these)?
7824 if (num_sack_blks == 0)
7825 goto out_with_totals;
7826 if (num_sack_blks > 1) {
7827 for (i = 0; i < num_sack_blks; i++) {
7828 for (j = i + 1; j < num_sack_blks; j++) {
7829 if (sack_blocks[i].end == sack_blocks[j].end) {
7831 * Ok these two have the same end we
7832 * want the smallest end and then
7833 * throw away the larger and start
7836 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
7838 * The second block covers
7839 * more area use that
7841 sack_blocks[i].start = sack_blocks[j].start;
7844 * Now collapse out the dup-sack and
7847 for (k = (j + 1); k < num_sack_blks; k++) {
7848 sack_blocks[j].start = sack_blocks[k].start;
7849 sack_blocks[j].end = sack_blocks[k].end;
7860 * First lets look to see if
7861 * we have retransmitted and
7862 * can use the transmit next?
7864 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
7866 SEQ_GT(sack_blocks[0].end, rsm->r_start) &&
7867 SEQ_LT(sack_blocks[0].start, rsm->r_end)) {
7869 * We probably did the FR and the next
7870 * SACK in continues as we would expect.
7872 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two);
7874 rack->r_wanted_output = 1;
7876 sack_changed += acked;
7878 if (num_sack_blks == 1) {
7880 * This is what we would expect from
7881 * a normal implementation to happen
7882 * after we have retransmitted the FR,
7883 * i.e the sack-filter pushes down
7884 * to 1 block and the next to be retransmitted
7885 * is the sequence in the sack block (has more
7886 * are acked). Count this as ACK'd data to boost
7887 * up the chances of recovering any false positives.
7889 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp));
7890 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp)));
7891 counter_u64_add(rack_express_sack, 1);
7892 if (rack->r_ctl.ack_count > 0xfff00000) {
7894 * reduce the number to keep us under
7897 rack->r_ctl.ack_count /= 2;
7898 rack->r_ctl.sack_count /= 2;
7900 goto out_with_totals;
7903 * Start the loop through the
7904 * rest of blocks, past the first block.
7910 /* Its a sack of some sort */
7911 rack->r_ctl.sack_count++;
7912 if (rack->r_ctl.sack_count > 0xfff00000) {
7914 * reduce the number to keep us under
7917 rack->r_ctl.ack_count /= 2;
7918 rack->r_ctl.sack_count /= 2;
7920 counter_u64_add(rack_sack_total, 1);
7921 if (rack->sack_attack_disable) {
7922 /* An attacker disablement is in place */
7923 if (num_sack_blks > 1) {
7924 rack->r_ctl.sack_count += (num_sack_blks - 1);
7925 rack->r_ctl.sack_moved_extra++;
7926 counter_u64_add(rack_move_some, 1);
7927 if (rack->r_ctl.sack_moved_extra > 0xfff00000) {
7928 rack->r_ctl.sack_moved_extra /= 2;
7929 rack->r_ctl.sack_noextra_move /= 2;
7934 rsm = rack->r_ctl.rc_sacklast;
7935 for (i = loop_start; i < num_sack_blks; i++) {
7936 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two);
7938 rack->r_wanted_output = 1;
7940 sack_changed += acked;
7944 * If we did not get a SACK for at least a MSS and
7945 * had to move at all, or if we moved more than our
7946 * threshold, it counts against the "extra" move.
7948 rack->r_ctl.sack_moved_extra += moved_two;
7949 counter_u64_add(rack_move_some, 1);
7952 * else we did not have to move
7953 * any more than we would expect.
7955 rack->r_ctl.sack_noextra_move++;
7956 counter_u64_add(rack_move_none, 1);
7958 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) {
7960 * If the SACK was not a full MSS then
7961 * we add to sack_count the number of
7962 * MSS's (or possibly more than
7963 * a MSS if its a TSO send) we had to skip by.
7965 rack->r_ctl.sack_count += moved_two;
7966 counter_u64_add(rack_sack_total, moved_two);
7969 * Now we need to setup for the next
7970 * round. First we make sure we won't
7971 * exceed the size of our uint32_t on
7972 * the various counts, and then clear out
7975 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) ||
7976 (rack->r_ctl.sack_noextra_move > 0xfff00000)) {
7977 rack->r_ctl.sack_moved_extra /= 2;
7978 rack->r_ctl.sack_noextra_move /= 2;
7980 if (rack->r_ctl.sack_count > 0xfff00000) {
7981 rack->r_ctl.ack_count /= 2;
7982 rack->r_ctl.sack_count /= 2;
7987 if (num_sack_blks > 1) {
7989 * You get an extra stroke if
7990 * you have more than one sack-blk, this
7991 * could be where we are skipping forward
7992 * and the sack-filter is still working, or
7993 * it could be an attacker constantly
7996 rack->r_ctl.sack_moved_extra++;
7997 counter_u64_add(rack_move_some, 1);
8000 #ifdef NETFLIX_EXP_DETECTION
8001 if ((rack->do_detection || tcp_force_detection) &&
8002 tcp_sack_to_ack_thresh &&
8003 tcp_sack_to_move_thresh &&
8004 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) {
8006 * We have thresholds set to find
8007 * possible attackers and disable sack.
8010 uint64_t ackratio, moveratio, movetotal;
8013 rack_log_sad(rack, 1);
8014 ackratio = (uint64_t)(rack->r_ctl.sack_count);
8015 ackratio *= (uint64_t)(1000);
8016 if (rack->r_ctl.ack_count)
8017 ackratio /= (uint64_t)(rack->r_ctl.ack_count);
8019 /* We really should not hit here */
8022 if ((rack->sack_attack_disable == 0) &&
8023 (ackratio > rack_highest_sack_thresh_seen))
8024 rack_highest_sack_thresh_seen = (uint32_t)ackratio;
8025 movetotal = rack->r_ctl.sack_moved_extra;
8026 movetotal += rack->r_ctl.sack_noextra_move;
8027 moveratio = rack->r_ctl.sack_moved_extra;
8028 moveratio *= (uint64_t)1000;
8030 moveratio /= movetotal;
8032 /* No moves, thats pretty good */
8035 if ((rack->sack_attack_disable == 0) &&
8036 (moveratio > rack_highest_move_thresh_seen))
8037 rack_highest_move_thresh_seen = (uint32_t)moveratio;
8038 if (rack->sack_attack_disable == 0) {
8039 if ((ackratio > tcp_sack_to_ack_thresh) &&
8040 (moveratio > tcp_sack_to_move_thresh)) {
8041 /* Disable sack processing */
8042 rack->sack_attack_disable = 1;
8043 if (rack->r_rep_attack == 0) {
8044 rack->r_rep_attack = 1;
8045 counter_u64_add(rack_sack_attacks_detected, 1);
8047 if (tcp_attack_on_turns_on_logging) {
8049 * Turn on logging, used for debugging
8052 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging;
8054 /* Clamp the cwnd at flight size */
8055 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd;
8056 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
8057 rack_log_sad(rack, 2);
8060 /* We are sack-disabled check for false positives */
8061 if ((ackratio <= tcp_restoral_thresh) ||
8062 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) {
8063 rack->sack_attack_disable = 0;
8064 rack_log_sad(rack, 3);
8065 /* Restart counting */
8066 rack->r_ctl.sack_count = 0;
8067 rack->r_ctl.sack_moved_extra = 0;
8068 rack->r_ctl.sack_noextra_move = 1;
8069 rack->r_ctl.ack_count = max(1,
8070 (BYTES_THIS_ACK(tp, th)/ctf_fixed_maxseg(rack->rc_tp)));
8072 if (rack->r_rep_reverse == 0) {
8073 rack->r_rep_reverse = 1;
8074 counter_u64_add(rack_sack_attacks_reversed, 1);
8076 /* Restore the cwnd */
8077 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd)
8078 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd;
8084 /* Something changed cancel the rack timer */
8085 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
8087 tsused = tcp_ts_getticks();
8088 rsm = tcp_rack_output(tp, rack, tsused);
8089 if ((!IN_RECOVERY(tp->t_flags)) &&
8091 /* Enter recovery */
8092 rack->r_ctl.rc_rsm_start = rsm->r_start;
8093 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
8094 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
8095 entered_recovery = 1;
8096 rack_cong_signal(tp, NULL, CC_NDUPACK);
8098 * When we enter recovery we need to assure we send
8101 if (rack->rack_no_prr == 0) {
8102 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
8103 rack_log_to_prr(rack, 8, 0);
8105 rack->r_timer_override = 1;
8107 rack->r_ctl.rc_agg_early = 0;
8108 } else if (IN_RECOVERY(tp->t_flags) &&
8110 (rack->r_rr_config == 3)) {
8112 * Assure we can output and we get no
8113 * remembered pace time except the retransmit.
8115 rack->r_timer_override = 1;
8116 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
8117 rack->r_ctl.rc_resend = rsm;
8119 if (IN_RECOVERY(tp->t_flags) &&
8120 (rack->rack_no_prr == 0) &&
8121 (entered_recovery == 0)) {
8122 /* Deal with PRR here (in recovery only) */
8123 uint32_t pipe, snd_una;
8125 rack->r_ctl.rc_prr_delivered += changed;
8126 /* Compute prr_sndcnt */
8127 if (SEQ_GT(tp->snd_una, th_ack)) {
8128 snd_una = tp->snd_una;
8132 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt;
8133 if (pipe > tp->snd_ssthresh) {
8136 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
8137 if (rack->r_ctl.rc_prr_recovery_fs > 0)
8138 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
8140 rack->r_ctl.rc_prr_sndcnt = 0;
8141 rack_log_to_prr(rack, 9, 0);
8145 if (sndcnt > (long)rack->r_ctl.rc_prr_out)
8146 sndcnt -= rack->r_ctl.rc_prr_out;
8149 rack->r_ctl.rc_prr_sndcnt = sndcnt;
8150 rack_log_to_prr(rack, 10, 0);
8154 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
8155 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
8158 if (changed > limit)
8160 limit += ctf_fixed_maxseg(tp);
8161 if (tp->snd_ssthresh > pipe) {
8162 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
8163 rack_log_to_prr(rack, 11, 0);
8165 rack->r_ctl.rc_prr_sndcnt = min(0, limit);
8166 rack_log_to_prr(rack, 12, 0);
8169 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) &&
8170 ((rack->rc_inp->inp_in_hpts == 0) &&
8171 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) {
8173 * If you are pacing output you don't want
8177 rack->r_ctl.rc_agg_early = 0;
8178 rack->r_timer_override = 1;
8184 rack_strike_dupack(struct tcp_rack *rack)
8186 struct rack_sendmap *rsm;
8188 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
8189 if (rsm && (rsm->r_dupack < 0xff)) {
8191 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) {
8192 rack->r_wanted_output = 1;
8193 rack->r_timer_override = 1;
8194 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3);
8196 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3);
8202 rack_check_bottom_drag(struct tcpcb *tp,
8203 struct tcp_rack *rack,
8204 struct socket *so, int32_t acked)
8206 uint32_t segsiz, minseg;
8208 segsiz = ctf_fixed_maxseg(tp);
8209 if (so->so_snd.sb_flags & SB_TLS_IFNET) {
8210 minseg = rack->r_ctl.rc_pace_min_segs;
8214 if (tp->snd_max == tp->snd_una) {
8216 * We are doing dynamic pacing and we are way
8217 * under. Basically everything got acked while
8218 * we were still waiting on the pacer to expire.
8220 * This means we need to boost the b/w in
8221 * addition to any earlier boosting of
8224 rack->rc_dragged_bottom = 1;
8225 rack_validate_multipliers_at_or_above100(rack);
8227 * Lets use the segment bytes acked plus
8228 * the lowest RTT seen as the basis to
8229 * form a b/w estimate. This will be off
8230 * due to the fact that the true estimate
8231 * should be around 1/2 the time of the RTT
8232 * but we can settle for that.
8234 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) &&
8236 uint64_t bw, calc_bw, rtt;
8238 rtt = rack->r_ctl.rack_rs.rs_us_rtt;
8240 calc_bw = bw * 1000000;
8242 if (rack->r_ctl.last_max_bw &&
8243 (rack->r_ctl.last_max_bw < calc_bw)) {
8245 * If we have a last calculated max bw
8248 calc_bw = rack->r_ctl.last_max_bw;
8250 /* now plop it in */
8251 if (rack->rc_gp_filled == 0) {
8252 if (calc_bw > ONE_POINT_TWO_MEG) {
8254 * If we have no measurement
8255 * don't let us set in more than
8256 * 1.2Mbps. If we are still too
8257 * low after pacing with this we
8258 * will hopefully have a max b/w
8259 * available to sanity check things.
8261 calc_bw = ONE_POINT_TWO_MEG;
8263 rack->r_ctl.rc_rtt_diff = 0;
8264 rack->r_ctl.gp_bw = calc_bw;
8265 rack->rc_gp_filled = 1;
8266 rack->r_ctl.num_avg = RACK_REQ_AVG;
8267 rack_set_pace_segments(rack->rc_tp, rack, __LINE__);
8268 } else if (calc_bw > rack->r_ctl.gp_bw) {
8269 rack->r_ctl.rc_rtt_diff = 0;
8270 rack->r_ctl.num_avg = RACK_REQ_AVG;
8271 rack->r_ctl.gp_bw = calc_bw;
8272 rack_set_pace_segments(rack->rc_tp, rack, __LINE__);
8274 rack_increase_bw_mul(rack, -1, 0, 0, 1);
8276 * For acks over 1mss we do a extra boost to simulate
8277 * where we would get 2 acks (we want 110 for the mul).
8280 rack_increase_bw_mul(rack, -1, 0, 0, 1);
8283 * Huh, this should not be, settle
8284 * for just an old increase.
8286 rack_increase_bw_mul(rack, -1, 0, 0, 1);
8288 } else if ((IN_RECOVERY(tp->t_flags) == 0) &&
8289 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)),
8291 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) &&
8292 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) &&
8293 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <=
8294 (segsiz * rack_req_segs))) {
8296 * We are doing dynamic GP pacing and
8297 * we have everything except 1MSS or less
8298 * bytes left out. We are still pacing away.
8299 * And there is data that could be sent, This
8300 * means we are inserting delayed ack time in
8301 * our measurements because we are pacing too slow.
8303 rack_validate_multipliers_at_or_above100(rack);
8304 rack->rc_dragged_bottom = 1;
8305 rack_increase_bw_mul(rack, -1, 0, 0, 1);
8310 * Return value of 1, we do not need to call rack_process_data().
8311 * return value of 0, rack_process_data can be called.
8312 * For ret_val if its 0 the TCP is locked, if its non-zero
8313 * its unlocked and probably unsafe to touch the TCB.
8316 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
8317 struct tcpcb *tp, struct tcpopt *to,
8318 uint32_t tiwin, int32_t tlen,
8319 int32_t * ofia, int32_t thflags, int32_t * ret_val)
8321 int32_t ourfinisacked = 0;
8322 int32_t nsegs, acked_amount;
8325 struct tcp_rack *rack;
8326 int32_t under_pacing = 0;
8327 int32_t recovery = 0;
8329 rack = (struct tcp_rack *)tp->t_fb_ptr;
8330 if (SEQ_GT(th->th_ack, tp->snd_max)) {
8331 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val);
8332 rack->r_wanted_output = 1;
8335 if (rack->rc_gp_filled &&
8336 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
8339 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
8340 if (rack->rc_in_persist)
8342 if ((th->th_ack == tp->snd_una) && (tiwin == tp->snd_wnd))
8343 rack_strike_dupack(rack);
8344 rack_log_ack(tp, to, th);
8346 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
8348 * Old ack, behind (or duplicate to) the last one rcv'd
8349 * Note: Should mark reordering is occuring! We should also
8350 * look for sack blocks arriving e.g. ack 1, 4-4 then ack 1,
8351 * 3-3, 4-4 would be reording. As well as ack 1, 3-3 <no
8357 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
8358 * something we sent.
8360 if (tp->t_flags & TF_NEEDSYN) {
8362 * T/TCP: Connection was half-synchronized, and our SYN has
8363 * been ACK'd (so connection is now fully synchronized). Go
8364 * to non-starred state, increment snd_una for ACK of SYN,
8365 * and check if we can do window scaling.
8367 tp->t_flags &= ~TF_NEEDSYN;
8369 /* Do window scaling? */
8370 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
8371 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
8372 tp->rcv_scale = tp->request_r_scale;
8373 /* Send window already scaled. */
8376 nsegs = max(1, m->m_pkthdr.lro_nsegs);
8377 INP_WLOCK_ASSERT(tp->t_inpcb);
8379 acked = BYTES_THIS_ACK(tp, th);
8380 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
8381 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
8383 * If we just performed our first retransmit, and the ACK arrives
8384 * within our recovery window, then it was a mistake to do the
8385 * retransmit in the first place. Recover our original cwnd and
8386 * ssthresh, and proceed to transmit where we left off.
8388 if (tp->t_flags & TF_PREVVALID) {
8389 tp->t_flags &= ~TF_PREVVALID;
8390 if (tp->t_rxtshift == 1 &&
8391 (int)(ticks - tp->t_badrxtwin) < 0)
8392 rack_cong_signal(tp, th, CC_RTO_ERR);
8395 /* assure we are not backed off */
8397 rack->rc_tlp_in_progress = 0;
8398 rack->r_ctl.rc_tlp_cnt_out = 0;
8400 * If it is the RXT timer we want to
8401 * stop it, so we can restart a TLP.
8403 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
8404 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
8405 #ifdef NETFLIX_HTTP_LOGGING
8406 tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
8410 * If we have a timestamp reply, update smoothed round trip time. If
8411 * no timestamp is present but transmit timer is running and timed
8412 * sequence number was acked, update smoothed round trip time. Since
8413 * we now have an rtt measurement, cancel the timer backoff (cf.,
8414 * Phil Karn's retransmit alg.). Recompute the initial retransmit
8417 * Some boxes send broken timestamp replies during the SYN+ACK
8418 * phase, ignore timestamps of 0 or we could calculate a huge RTT
8419 * and blow up the retransmit timer.
8422 * If all outstanding data is acked, stop retransmit timer and
8423 * remember to restart (more output or persist). If there is more
8424 * data to be acked, restart retransmit timer, using current
8425 * (possibly backed-off) value.
8429 *ofia = ourfinisacked;
8432 if (rack->r_ctl.rc_early_recovery) {
8433 if (IN_RECOVERY(tp->t_flags)) {
8434 if (SEQ_LT(th->th_ack, tp->snd_recover) &&
8435 (SEQ_LT(th->th_ack, tp->snd_max))) {
8436 tcp_rack_partialack(tp, th);
8438 rack_post_recovery(tp, th);
8444 * Let the congestion control algorithm update congestion control
8445 * related information. This typically means increasing the
8446 * congestion window.
8448 rack_ack_received(tp, rack, th, nsegs, CC_ACK, recovery);
8449 SOCKBUF_LOCK(&so->so_snd);
8450 acked_amount = min(acked, (int)sbavail(&so->so_snd));
8451 tp->snd_wnd -= acked_amount;
8452 mfree = sbcut_locked(&so->so_snd, acked_amount);
8453 if ((sbused(&so->so_snd) == 0) &&
8454 (acked > acked_amount) &&
8455 (tp->t_state >= TCPS_FIN_WAIT_1) &&
8456 (tp->t_flags & TF_SENTFIN)) {
8458 * We must be sure our fin
8459 * was sent and acked (we can be
8460 * in FIN_WAIT_1 without having
8465 /* NB: sowwakeup_locked() does an implicit unlock. */
8466 sowwakeup_locked(so);
8468 if (rack->r_ctl.rc_early_recovery == 0) {
8469 if (IN_RECOVERY(tp->t_flags)) {
8470 if (SEQ_LT(th->th_ack, tp->snd_recover) &&
8471 (SEQ_LT(th->th_ack, tp->snd_max))) {
8472 tcp_rack_partialack(tp, th);
8474 rack_post_recovery(tp, th);
8478 tp->snd_una = th->th_ack;
8479 if (SEQ_GT(tp->snd_una, tp->snd_recover))
8480 tp->snd_recover = tp->snd_una;
8482 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
8483 tp->snd_nxt = tp->snd_una;
8486 (rack->use_fixed_rate == 0) &&
8487 (rack->in_probe_rtt == 0) &&
8488 rack->rc_gp_dyn_mul &&
8489 rack->rc_always_pace) {
8490 /* Check if we are dragging bottom */
8491 rack_check_bottom_drag(tp, rack, so, acked);
8493 if (tp->snd_una == tp->snd_max) {
8494 /* Nothing left outstanding */
8495 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
8496 if (rack->r_ctl.rc_went_idle_time == 0)
8497 rack->r_ctl.rc_went_idle_time = 1;
8498 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
8499 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
8501 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
8502 /* Set need output so persist might get set */
8503 rack->r_wanted_output = 1;
8504 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
8505 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
8506 (sbavail(&so->so_snd) == 0) &&
8507 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
8509 * The socket was gone and the
8510 * peer sent data, time to
8514 /* tcp_close will kill the inp pre-log the Reset */
8515 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
8517 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
8523 *ofia = ourfinisacked;
8528 rack_collapsed_window(struct tcp_rack *rack)
8531 * Now we must walk the
8532 * send map and divide the
8533 * ones left stranded. These
8534 * guys can't cause us to abort
8535 * the connection and are really
8536 * "unsent". However if a buggy
8537 * client actually did keep some
8538 * of the data i.e. collapsed the win
8539 * and refused to ack and then opened
8540 * the win and acked that data. We would
8541 * get into an ack war, the simplier
8542 * method then of just pretending we
8543 * did not send those segments something
8546 struct rack_sendmap *rsm, *nrsm, fe, *insret;
8549 max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd;
8550 memset(&fe, 0, sizeof(fe));
8551 fe.r_start = max_seq;
8552 /* Find the first seq past or at maxseq */
8553 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
8555 /* Nothing to do strange */
8556 rack->rc_has_collapsed = 0;
8560 * Now do we need to split at
8561 * the collapse point?
8563 if (SEQ_GT(max_seq, rsm->r_start)) {
8564 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
8566 /* We can't get a rsm, mark all? */
8571 rack_clone_rsm(rack, nrsm, rsm, max_seq);
8572 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8574 if (insret != NULL) {
8575 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
8576 nrsm, insret, rack, rsm);
8579 if (rsm->r_in_tmap) {
8580 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8581 nrsm->r_in_tmap = 1;
8584 * Set in the new RSM as the
8585 * collapsed starting point
8590 counter_u64_add(rack_collapsed_win, 1);
8591 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) {
8592 nrsm->r_flags |= RACK_RWND_COLLAPSED;
8593 rack->rc_has_collapsed = 1;
8598 rack_un_collapse_window(struct tcp_rack *rack)
8600 struct rack_sendmap *rsm;
8602 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
8603 if (rsm->r_flags & RACK_RWND_COLLAPSED)
8604 rsm->r_flags &= ~RACK_RWND_COLLAPSED;
8608 rack->rc_has_collapsed = 0;
8612 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack,
8613 int32_t tlen, int32_t tfo_syn)
8615 if (DELAY_ACK(tp, tlen) || tfo_syn) {
8616 if (rack->rc_dack_mode &&
8618 (rack->rc_dack_toggle == 1)) {
8619 goto no_delayed_ack;
8621 rack_timer_cancel(tp, rack,
8622 rack->r_ctl.rc_rcvtime, __LINE__);
8623 tp->t_flags |= TF_DELACK;
8626 rack->r_wanted_output = 1;
8627 tp->t_flags |= TF_ACKNOW;
8628 if (rack->rc_dack_mode) {
8629 if (tp->t_flags & TF_DELACK)
8630 rack->rc_dack_toggle = 1;
8632 rack->rc_dack_toggle = 0;
8637 * Return value of 1, the TCB is unlocked and most
8638 * likely gone, return value of 0, the TCP is still
8642 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
8643 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
8644 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
8647 * Update window information. Don't look at window if no ACK: TAC's
8648 * send garbage on first SYN.
8652 struct tcp_rack *rack;
8654 rack = (struct tcp_rack *)tp->t_fb_ptr;
8655 INP_WLOCK_ASSERT(tp->t_inpcb);
8656 nsegs = max(1, m->m_pkthdr.lro_nsegs);
8657 if ((thflags & TH_ACK) &&
8658 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
8659 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
8660 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
8661 /* keep track of pure window updates */
8663 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
8664 KMOD_TCPSTAT_INC(tcps_rcvwinupd);
8665 tp->snd_wnd = tiwin;
8666 tp->snd_wl1 = th->th_seq;
8667 tp->snd_wl2 = th->th_ack;
8668 if (tp->snd_wnd > tp->max_sndwnd)
8669 tp->max_sndwnd = tp->snd_wnd;
8670 rack->r_wanted_output = 1;
8671 } else if (thflags & TH_ACK) {
8672 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
8673 tp->snd_wnd = tiwin;
8674 tp->snd_wl1 = th->th_seq;
8675 tp->snd_wl2 = th->th_ack;
8678 if (tp->snd_wnd < ctf_outstanding(tp))
8679 /* The peer collapsed the window */
8680 rack_collapsed_window(rack);
8681 else if (rack->rc_has_collapsed)
8682 rack_un_collapse_window(rack);
8683 /* Was persist timer active and now we have window space? */
8684 if ((rack->rc_in_persist != 0) &&
8685 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
8686 rack->r_ctl.rc_pace_min_segs))) {
8687 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime);
8688 tp->snd_nxt = tp->snd_max;
8689 /* Make sure we output to start the timer */
8690 rack->r_wanted_output = 1;
8692 /* Do we enter persists? */
8693 if ((rack->rc_in_persist == 0) &&
8694 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
8695 TCPS_HAVEESTABLISHED(tp->t_state) &&
8696 (tp->snd_max == tp->snd_una) &&
8697 sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
8698 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
8700 * Here the rwnd is less than
8701 * the pacing size, we are established,
8702 * nothing is outstanding, and there is
8703 * data to send. Enter persists.
8705 tp->snd_nxt = tp->snd_una;
8706 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
8708 if (tp->t_flags2 & TF2_DROP_AF_DATA) {
8713 * don't process the URG bit, ignore them drag
8716 tp->rcv_up = tp->rcv_nxt;
8717 INP_WLOCK_ASSERT(tp->t_inpcb);
8720 * Process the segment text, merging it into the TCP sequencing
8721 * queue, and arranging for acknowledgment of receipt if necessary.
8722 * This process logically involves adjusting tp->rcv_wnd as data is
8723 * presented to the user (this happens in tcp_usrreq.c, case
8724 * PRU_RCVD). If a FIN has already been received on this connection
8725 * then we just ignore the text.
8727 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
8728 IS_FASTOPEN(tp->t_flags));
8729 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
8730 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
8731 tcp_seq save_start = th->th_seq;
8732 tcp_seq save_rnxt = tp->rcv_nxt;
8733 int save_tlen = tlen;
8735 m_adj(m, drop_hdrlen); /* delayed header drop */
8737 * Insert segment which includes th into TCP reassembly
8738 * queue with control block tp. Set thflags to whether
8739 * reassembly now includes a segment with FIN. This handles
8740 * the common case inline (segment is the next to be
8741 * received on an established connection, and the queue is
8742 * empty), avoiding linkage into and removal from the queue
8743 * and repetition of various conversions. Set DELACK for
8744 * segments received in order, but ack immediately when
8745 * segments are out of order (so fast retransmit can work).
8747 if (th->th_seq == tp->rcv_nxt &&
8749 (TCPS_HAVEESTABLISHED(tp->t_state) ||
8751 #ifdef NETFLIX_SB_LIMITS
8752 u_int mcnt, appended;
8754 if (so->so_rcv.sb_shlim) {
8757 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
8758 CFO_NOSLEEP, NULL) == false) {
8759 counter_u64_add(tcp_sb_shlim_fails, 1);
8765 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn);
8766 tp->rcv_nxt += tlen;
8768 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
8769 (tp->t_fbyte_in == 0)) {
8770 tp->t_fbyte_in = ticks;
8771 if (tp->t_fbyte_in == 0)
8773 if (tp->t_fbyte_out && tp->t_fbyte_in)
8774 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
8776 thflags = th->th_flags & TH_FIN;
8777 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
8778 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
8779 SOCKBUF_LOCK(&so->so_rcv);
8780 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
8783 #ifdef NETFLIX_SB_LIMITS
8786 sbappendstream_locked(&so->so_rcv, m, 0);
8787 /* NB: sorwakeup_locked() does an implicit unlock. */
8788 sorwakeup_locked(so);
8789 #ifdef NETFLIX_SB_LIMITS
8790 if (so->so_rcv.sb_shlim && appended != mcnt)
8791 counter_fo_release(so->so_rcv.sb_shlim,
8796 * XXX: Due to the header drop above "th" is
8797 * theoretically invalid by now. Fortunately
8798 * m_adj() doesn't actually frees any mbufs when
8799 * trimming from the head.
8801 tcp_seq temp = save_start;
8802 thflags = tcp_reass(tp, th, &temp, &tlen, m);
8803 tp->t_flags |= TF_ACKNOW;
8805 if ((tp->t_flags & TF_SACK_PERMIT) && (save_tlen > 0)) {
8806 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
8808 * DSACK actually handled in the fastpath
8811 RACK_OPTS_INC(tcp_sack_path_1);
8812 tcp_update_sack_list(tp, save_start,
8813 save_start + save_tlen);
8814 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
8815 if ((tp->rcv_numsacks >= 1) &&
8816 (tp->sackblks[0].end == save_start)) {
8818 * Partial overlap, recorded at todrop
8821 RACK_OPTS_INC(tcp_sack_path_2a);
8822 tcp_update_sack_list(tp,
8823 tp->sackblks[0].start,
8824 tp->sackblks[0].end);
8826 RACK_OPTS_INC(tcp_sack_path_2b);
8827 tcp_update_dsack_list(tp, save_start,
8828 save_start + save_tlen);
8830 } else if (tlen >= save_tlen) {
8831 /* Update of sackblks. */
8832 RACK_OPTS_INC(tcp_sack_path_3);
8833 tcp_update_dsack_list(tp, save_start,
8834 save_start + save_tlen);
8835 } else if (tlen > 0) {
8836 RACK_OPTS_INC(tcp_sack_path_4);
8837 tcp_update_dsack_list(tp, save_start,
8847 * If FIN is received ACK the FIN and let the user know that the
8848 * connection is closing.
8850 if (thflags & TH_FIN) {
8851 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
8854 * If connection is half-synchronized (ie NEEDSYN
8855 * flag on) then delay ACK, so it may be piggybacked
8856 * when SYN is sent. Otherwise, since we received a
8857 * FIN then no more input can be expected, send ACK
8860 if (tp->t_flags & TF_NEEDSYN) {
8861 rack_timer_cancel(tp, rack,
8862 rack->r_ctl.rc_rcvtime, __LINE__);
8863 tp->t_flags |= TF_DELACK;
8865 tp->t_flags |= TF_ACKNOW;
8869 switch (tp->t_state) {
8872 * In SYN_RECEIVED and ESTABLISHED STATES enter the
8875 case TCPS_SYN_RECEIVED:
8876 tp->t_starttime = ticks;
8878 case TCPS_ESTABLISHED:
8879 rack_timer_cancel(tp, rack,
8880 rack->r_ctl.rc_rcvtime, __LINE__);
8881 tcp_state_change(tp, TCPS_CLOSE_WAIT);
8885 * If still in FIN_WAIT_1 STATE FIN has not been
8886 * acked so enter the CLOSING state.
8888 case TCPS_FIN_WAIT_1:
8889 rack_timer_cancel(tp, rack,
8890 rack->r_ctl.rc_rcvtime, __LINE__);
8891 tcp_state_change(tp, TCPS_CLOSING);
8895 * In FIN_WAIT_2 state enter the TIME_WAIT state,
8896 * starting the time-wait timer, turning off the
8897 * other standard timers.
8899 case TCPS_FIN_WAIT_2:
8900 rack_timer_cancel(tp, rack,
8901 rack->r_ctl.rc_rcvtime, __LINE__);
8907 * Return any desired output.
8909 if ((tp->t_flags & TF_ACKNOW) ||
8910 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
8911 rack->r_wanted_output = 1;
8913 INP_WLOCK_ASSERT(tp->t_inpcb);
8918 * Here nothing is really faster, its just that we
8919 * have broken out the fast-data path also just like
8923 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
8924 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
8925 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos)
8928 int32_t newsize = 0; /* automatic sockbuf scaling */
8929 struct tcp_rack *rack;
8930 #ifdef NETFLIX_SB_LIMITS
8931 u_int mcnt, appended;
8935 * The size of tcp_saveipgen must be the size of the max ip header,
8938 u_char tcp_saveipgen[IP6_HDR_LEN];
8939 struct tcphdr tcp_savetcp;
8944 * If last ACK falls within this segment's sequence numbers, record
8945 * the timestamp. NOTE that the test is modified according to the
8946 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
8948 if (__predict_false(th->th_seq != tp->rcv_nxt)) {
8951 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
8954 if (tiwin && tiwin != tp->snd_wnd) {
8957 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
8960 if (__predict_false((to->to_flags & TOF_TS) &&
8961 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
8964 if (__predict_false((th->th_ack != tp->snd_una))) {
8967 if (__predict_false(tlen > sbspace(&so->so_rcv))) {
8970 if ((to->to_flags & TOF_TS) != 0 &&
8971 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
8972 tp->ts_recent_age = tcp_ts_getticks();
8973 tp->ts_recent = to->to_tsval;
8975 rack = (struct tcp_rack *)tp->t_fb_ptr;
8977 * This is a pure, in-sequence data packet with nothing on the
8978 * reassembly queue and we have enough buffer space to take it.
8980 nsegs = max(1, m->m_pkthdr.lro_nsegs);
8982 #ifdef NETFLIX_SB_LIMITS
8983 if (so->so_rcv.sb_shlim) {
8986 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
8987 CFO_NOSLEEP, NULL) == false) {
8988 counter_u64_add(tcp_sb_shlim_fails, 1);
8994 /* Clean receiver SACK report if present */
8995 if (tp->rcv_numsacks)
8996 tcp_clean_sackreport(tp);
8997 KMOD_TCPSTAT_INC(tcps_preddat);
8998 tp->rcv_nxt += tlen;
9000 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
9001 (tp->t_fbyte_in == 0)) {
9002 tp->t_fbyte_in = ticks;
9003 if (tp->t_fbyte_in == 0)
9005 if (tp->t_fbyte_out && tp->t_fbyte_in)
9006 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
9009 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
9011 tp->snd_wl1 = th->th_seq;
9013 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
9015 tp->rcv_up = tp->rcv_nxt;
9016 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
9017 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
9019 if (so->so_options & SO_DEBUG)
9020 tcp_trace(TA_INPUT, ostate, tp,
9021 (void *)tcp_saveipgen, &tcp_savetcp, 0);
9023 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
9025 /* Add data to socket buffer. */
9026 SOCKBUF_LOCK(&so->so_rcv);
9027 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
9031 * Set new socket buffer size. Give up when limit is
9035 if (!sbreserve_locked(&so->so_rcv,
9037 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
9038 m_adj(m, drop_hdrlen); /* delayed header drop */
9039 #ifdef NETFLIX_SB_LIMITS
9042 sbappendstream_locked(&so->so_rcv, m, 0);
9043 ctf_calc_rwin(so, tp);
9045 /* NB: sorwakeup_locked() does an implicit unlock. */
9046 sorwakeup_locked(so);
9047 #ifdef NETFLIX_SB_LIMITS
9048 if (so->so_rcv.sb_shlim && mcnt != appended)
9049 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended);
9051 rack_handle_delayed_ack(tp, rack, tlen, 0);
9052 if (tp->snd_una == tp->snd_max)
9053 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
9058 * This subfunction is used to try to highly optimize the
9059 * fast path. We again allow window updates that are
9060 * in sequence to remain in the fast-path. We also add
9061 * in the __predict's to attempt to help the compiler.
9062 * Note that if we return a 0, then we can *not* process
9063 * it and the caller should push the packet into the
9067 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
9068 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
9069 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
9075 * The size of tcp_saveipgen must be the size of the max ip header,
9078 u_char tcp_saveipgen[IP6_HDR_LEN];
9079 struct tcphdr tcp_savetcp;
9082 int32_t under_pacing = 0;
9083 struct tcp_rack *rack;
9085 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
9086 /* Old ack, behind (or duplicate to) the last one rcv'd */
9089 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
9090 /* Above what we have sent? */
9093 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
9094 /* We are retransmitting */
9097 if (__predict_false(tiwin == 0)) {
9101 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
9102 /* We need a SYN or a FIN, unlikely.. */
9105 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
9106 /* Timestamp is behind .. old ack with seq wrap? */
9109 if (__predict_false(IN_RECOVERY(tp->t_flags))) {
9110 /* Still recovering */
9113 rack = (struct tcp_rack *)tp->t_fb_ptr;
9114 if (rack->r_ctl.rc_sacked) {
9115 /* We have sack holes on our scoreboard */
9118 /* Ok if we reach here, we can process a fast-ack */
9119 if (rack->rc_gp_filled &&
9120 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
9123 nsegs = max(1, m->m_pkthdr.lro_nsegs);
9124 rack_log_ack(tp, to, th);
9125 /* Did the window get updated? */
9126 if (tiwin != tp->snd_wnd) {
9127 tp->snd_wnd = tiwin;
9128 tp->snd_wl1 = th->th_seq;
9129 if (tp->snd_wnd > tp->max_sndwnd)
9130 tp->max_sndwnd = tp->snd_wnd;
9132 /* Do we exit persists? */
9133 if ((rack->rc_in_persist != 0) &&
9134 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
9135 rack->r_ctl.rc_pace_min_segs))) {
9136 rack_exit_persist(tp, rack, cts);
9138 /* Do we enter persists? */
9139 if ((rack->rc_in_persist == 0) &&
9140 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
9141 TCPS_HAVEESTABLISHED(tp->t_state) &&
9142 (tp->snd_max == tp->snd_una) &&
9143 sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
9144 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
9146 * Here the rwnd is less than
9147 * the pacing size, we are established,
9148 * nothing is outstanding, and there is
9149 * data to send. Enter persists.
9151 tp->snd_nxt = tp->snd_una;
9152 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
9155 * If last ACK falls within this segment's sequence numbers, record
9156 * the timestamp. NOTE that the test is modified according to the
9157 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
9159 if ((to->to_flags & TOF_TS) != 0 &&
9160 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
9161 tp->ts_recent_age = tcp_ts_getticks();
9162 tp->ts_recent = to->to_tsval;
9165 * This is a pure ack for outstanding data.
9167 KMOD_TCPSTAT_INC(tcps_predack);
9170 * "bad retransmit" recovery.
9172 if (tp->t_flags & TF_PREVVALID) {
9173 tp->t_flags &= ~TF_PREVVALID;
9174 if (tp->t_rxtshift == 1 &&
9175 (int)(ticks - tp->t_badrxtwin) < 0)
9176 rack_cong_signal(tp, th, CC_RTO_ERR);
9179 * Recalculate the transmit timer / rtt.
9181 * Some boxes send broken timestamp replies during the SYN+ACK
9182 * phase, ignore timestamps of 0 or we could calculate a huge RTT
9183 * and blow up the retransmit timer.
9185 acked = BYTES_THIS_ACK(tp, th);
9188 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
9189 hhook_run_tcp_est_in(tp, th, to);
9192 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
9193 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
9194 sbdrop(&so->so_snd, acked);
9196 /* assure we are not backed off */
9198 rack->rc_tlp_in_progress = 0;
9199 rack->r_ctl.rc_tlp_cnt_out = 0;
9201 * If it is the RXT timer we want to
9202 * stop it, so we can restart a TLP.
9204 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
9205 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9206 #ifdef NETFLIX_HTTP_LOGGING
9207 tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
9211 * Let the congestion control algorithm update congestion control
9212 * related information. This typically means increasing the
9213 * congestion window.
9215 rack_ack_received(tp, rack, th, nsegs, CC_ACK, 0);
9217 tp->snd_una = th->th_ack;
9218 if (tp->snd_wnd < ctf_outstanding(tp)) {
9219 /* The peer collapsed the window */
9220 rack_collapsed_window(rack);
9221 } else if (rack->rc_has_collapsed)
9222 rack_un_collapse_window(rack);
9225 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
9227 tp->snd_wl2 = th->th_ack;
9230 /* ND6_HINT(tp); *//* Some progress has been made. */
9233 * If all outstanding data are acked, stop retransmit timer,
9234 * otherwise restart timer using current (possibly backed-off)
9235 * value. If process is waiting for space, wakeup/selwakeup/signal.
9236 * If data are ready to send, let tcp_output decide between more
9237 * output or persist.
9240 if (so->so_options & SO_DEBUG)
9241 tcp_trace(TA_INPUT, ostate, tp,
9242 (void *)tcp_saveipgen,
9246 (rack->use_fixed_rate == 0) &&
9247 (rack->in_probe_rtt == 0) &&
9248 rack->rc_gp_dyn_mul &&
9249 rack->rc_always_pace) {
9250 /* Check if we are dragging bottom */
9251 rack_check_bottom_drag(tp, rack, so, acked);
9253 if (tp->snd_una == tp->snd_max) {
9254 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
9255 if (rack->r_ctl.rc_went_idle_time == 0)
9256 rack->r_ctl.rc_went_idle_time = 1;
9257 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
9258 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
9260 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9262 /* Wake up the socket if we have room to write more */
9264 if (sbavail(&so->so_snd)) {
9265 rack->r_wanted_output = 1;
9271 * Return value of 1, the TCB is unlocked and most
9272 * likely gone, return value of 0, the TCP is still
9276 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
9277 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
9278 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
9280 int32_t ret_val = 0;
9282 int32_t ourfinisacked = 0;
9283 struct tcp_rack *rack;
9285 ctf_calc_rwin(so, tp);
9287 * If the state is SYN_SENT: if seg contains an ACK, but not for our
9288 * SYN, drop the input. if seg contains a RST, then drop the
9289 * connection. if seg does not contain SYN, then drop it. Otherwise
9290 * this is an acceptable SYN segment initialize tp->rcv_nxt and
9291 * tp->irs if seg contains ack then advance tp->snd_una if seg
9292 * contains an ECE and ECN support is enabled, the stream is ECN
9293 * capable. if SYN has been acked change to ESTABLISHED else
9294 * SYN_RCVD state arrange for segment to be acked (eventually)
9295 * continue processing rest of data/controls.
9297 if ((thflags & TH_ACK) &&
9298 (SEQ_LEQ(th->th_ack, tp->iss) ||
9299 SEQ_GT(th->th_ack, tp->snd_max))) {
9300 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
9301 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
9304 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
9305 TCP_PROBE5(connect__refused, NULL, tp,
9306 mtod(m, const char *), tp, th);
9307 tp = tcp_drop(tp, ECONNREFUSED);
9311 if (thflags & TH_RST) {
9315 if (!(thflags & TH_SYN)) {
9319 tp->irs = th->th_seq;
9321 rack = (struct tcp_rack *)tp->t_fb_ptr;
9322 if (thflags & TH_ACK) {
9323 int tfo_partial = 0;
9325 KMOD_TCPSTAT_INC(tcps_connects);
9328 mac_socketpeer_set_from_mbuf(m, so);
9330 /* Do window scaling on this connection? */
9331 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
9332 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
9333 tp->rcv_scale = tp->request_r_scale;
9335 tp->rcv_adv += min(tp->rcv_wnd,
9336 TCP_MAXWIN << tp->rcv_scale);
9338 * If not all the data that was sent in the TFO SYN
9339 * has been acked, resend the remainder right away.
9341 if (IS_FASTOPEN(tp->t_flags) &&
9342 (tp->snd_una != tp->snd_max)) {
9343 tp->snd_nxt = th->th_ack;
9347 * If there's data, delay ACK; if there's also a FIN ACKNOW
9348 * will be turned on later.
9350 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) {
9351 rack_timer_cancel(tp, rack,
9352 rack->r_ctl.rc_rcvtime, __LINE__);
9353 tp->t_flags |= TF_DELACK;
9355 rack->r_wanted_output = 1;
9356 tp->t_flags |= TF_ACKNOW;
9357 rack->rc_dack_toggle = 0;
9359 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) &&
9360 (V_tcp_do_ecn == 1)) {
9361 tp->t_flags2 |= TF2_ECN_PERMIT;
9362 KMOD_TCPSTAT_INC(tcps_ecn_shs);
9364 if (SEQ_GT(th->th_ack, tp->snd_una)) {
9366 * We advance snd_una for the
9367 * fast open case. If th_ack is
9368 * acknowledging data beyond
9369 * snd_una we can't just call
9370 * ack-processing since the
9371 * data stream in our send-map
9372 * will start at snd_una + 1 (one
9373 * beyond the SYN). If its just
9374 * equal we don't need to do that
9375 * and there is no send_map.
9380 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
9381 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
9383 tp->t_starttime = ticks;
9384 if (tp->t_flags & TF_NEEDFIN) {
9385 tcp_state_change(tp, TCPS_FIN_WAIT_1);
9386 tp->t_flags &= ~TF_NEEDFIN;
9389 tcp_state_change(tp, TCPS_ESTABLISHED);
9390 TCP_PROBE5(connect__established, NULL, tp,
9391 mtod(m, const char *), tp, th);
9392 rack_cc_conn_init(tp);
9396 * Received initial SYN in SYN-SENT[*] state => simultaneous
9397 * open. If segment contains CC option and there is a
9398 * cached CC, apply TAO test. If it succeeds, connection is *
9399 * half-synchronized. Otherwise, do 3-way handshake:
9400 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
9401 * there was no CC option, clear cached CC value.
9403 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
9404 tcp_state_change(tp, TCPS_SYN_RECEIVED);
9406 INP_WLOCK_ASSERT(tp->t_inpcb);
9408 * Advance th->th_seq to correspond to first data byte. If data,
9409 * trim to stay within window, dropping FIN if necessary.
9412 if (tlen > tp->rcv_wnd) {
9413 todrop = tlen - tp->rcv_wnd;
9417 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin);
9418 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
9420 tp->snd_wl1 = th->th_seq - 1;
9421 tp->rcv_up = th->th_seq;
9423 * Client side of transaction: already sent SYN and data. If the
9424 * remote host used T/TCP to validate the SYN, our data will be
9425 * ACK'd; if so, enter normal data segment processing in the middle
9426 * of step 5, ack processing. Otherwise, goto step 6.
9428 if (thflags & TH_ACK) {
9429 /* For syn-sent we need to possibly update the rtt */
9430 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
9433 t = tcp_ts_getticks() - to->to_tsecr;
9434 if (!tp->t_rttlow || tp->t_rttlow > t)
9436 tcp_rack_xmit_timer(rack, t + 1, 1, (t * HPTS_USEC_IN_MSEC), 0, NULL, 2);
9437 tcp_rack_xmit_timer_commit(rack, tp);
9439 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
9441 /* We may have changed to FIN_WAIT_1 above */
9442 if (tp->t_state == TCPS_FIN_WAIT_1) {
9444 * In FIN_WAIT_1 STATE in addition to the processing
9445 * for the ESTABLISHED state if our FIN is now
9446 * acknowledged then enter FIN_WAIT_2.
9448 if (ourfinisacked) {
9450 * If we can't receive any more data, then
9451 * closing user can proceed. Starting the
9452 * timer is contrary to the specification,
9453 * but if we don't get a FIN we'll hang
9456 * XXXjl: we should release the tp also, and
9457 * use a compressed state.
9459 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
9460 soisdisconnected(so);
9461 tcp_timer_activate(tp, TT_2MSL,
9462 (tcp_fast_finwait2_recycle ?
9463 tcp_finwait2_timeout :
9466 tcp_state_change(tp, TCPS_FIN_WAIT_2);
9470 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9471 tiwin, thflags, nxt_pkt));
9475 * Return value of 1, the TCB is unlocked and most
9476 * likely gone, return value of 0, the TCP is still
9480 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
9481 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
9482 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
9484 struct tcp_rack *rack;
9485 int32_t ret_val = 0;
9486 int32_t ourfinisacked = 0;
9488 ctf_calc_rwin(so, tp);
9489 if ((thflags & TH_ACK) &&
9490 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
9491 SEQ_GT(th->th_ack, tp->snd_max))) {
9492 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
9493 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
9496 rack = (struct tcp_rack *)tp->t_fb_ptr;
9497 if (IS_FASTOPEN(tp->t_flags)) {
9499 * When a TFO connection is in SYN_RECEIVED, the
9500 * only valid packets are the initial SYN, a
9501 * retransmit/copy of the initial SYN (possibly with
9502 * a subset of the original data), a valid ACK, a
9505 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
9506 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
9507 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
9509 } else if (thflags & TH_SYN) {
9510 /* non-initial SYN is ignored */
9511 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
9512 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
9513 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
9514 ctf_do_drop(m, NULL);
9517 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
9518 ctf_do_drop(m, NULL);
9522 if ((thflags & TH_RST) ||
9523 (tp->t_fin_is_rst && (thflags & TH_FIN)))
9524 return (ctf_process_rst(m, th, so, tp));
9526 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
9527 * it's less than ts_recent, drop it.
9529 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
9530 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
9531 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
9535 * In the SYN-RECEIVED state, validate that the packet belongs to
9536 * this connection before trimming the data to fit the receive
9537 * window. Check the sequence number versus IRS since we know the
9538 * sequence numbers haven't wrapped. This is a partial fix for the
9539 * "LAND" DoS attack.
9541 if (SEQ_LT(th->th_seq, tp->irs)) {
9542 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
9543 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
9546 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
9550 * If last ACK falls within this segment's sequence numbers, record
9551 * its timestamp. NOTE: 1) That the test incorporates suggestions
9552 * from the latest proposal of the tcplw@cray.com list (Braden
9553 * 1993/04/26). 2) That updating only on newer timestamps interferes
9554 * with our earlier PAWS tests, so this check should be solely
9555 * predicated on the sequence space of this segment. 3) That we
9556 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
9557 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
9558 * SEG.Len, This modified check allows us to overcome RFC1323's
9559 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
9560 * p.869. In such cases, we can still calculate the RTT correctly
9561 * when RCV.NXT == Last.ACK.Sent.
9563 if ((to->to_flags & TOF_TS) != 0 &&
9564 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
9565 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
9566 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
9567 tp->ts_recent_age = tcp_ts_getticks();
9568 tp->ts_recent = to->to_tsval;
9570 tp->snd_wnd = tiwin;
9572 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
9573 * is on (half-synchronized state), then queue data for later
9574 * processing; else drop segment and return.
9576 if ((thflags & TH_ACK) == 0) {
9577 if (IS_FASTOPEN(tp->t_flags)) {
9578 rack_cc_conn_init(tp);
9580 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9581 tiwin, thflags, nxt_pkt));
9583 KMOD_TCPSTAT_INC(tcps_connects);
9585 /* Do window scaling? */
9586 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
9587 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
9588 tp->rcv_scale = tp->request_r_scale;
9591 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* ->
9594 tp->t_starttime = ticks;
9595 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
9596 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
9597 tp->t_tfo_pending = NULL;
9599 if (tp->t_flags & TF_NEEDFIN) {
9600 tcp_state_change(tp, TCPS_FIN_WAIT_1);
9601 tp->t_flags &= ~TF_NEEDFIN;
9603 tcp_state_change(tp, TCPS_ESTABLISHED);
9604 TCP_PROBE5(accept__established, NULL, tp,
9605 mtod(m, const char *), tp, th);
9607 * TFO connections call cc_conn_init() during SYN
9608 * processing. Calling it again here for such connections
9609 * is not harmless as it would undo the snd_cwnd reduction
9610 * that occurs when a TFO SYN|ACK is retransmitted.
9612 if (!IS_FASTOPEN(tp->t_flags))
9613 rack_cc_conn_init(tp);
9616 * Account for the ACK of our SYN prior to
9617 * regular ACK processing below, except for
9618 * simultaneous SYN, which is handled later.
9620 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
9623 * If segment contains data or ACK, will call tcp_reass() later; if
9624 * not, do so now to pass queued data to user.
9626 if (tlen == 0 && (thflags & TH_FIN) == 0)
9627 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
9629 tp->snd_wl1 = th->th_seq - 1;
9630 /* For syn-recv we need to possibly update the rtt */
9631 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
9634 t = tcp_ts_getticks() - to->to_tsecr;
9635 if (!tp->t_rttlow || tp->t_rttlow > t)
9637 tcp_rack_xmit_timer(rack, t + 1, 1, (t * HPTS_USEC_IN_MSEC), 0, NULL, 2);
9638 tcp_rack_xmit_timer_commit(rack, tp);
9640 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
9643 if (tp->t_state == TCPS_FIN_WAIT_1) {
9644 /* We could have went to FIN_WAIT_1 (or EST) above */
9646 * In FIN_WAIT_1 STATE in addition to the processing for the
9647 * ESTABLISHED state if our FIN is now acknowledged then
9650 if (ourfinisacked) {
9652 * If we can't receive any more data, then closing
9653 * user can proceed. Starting the timer is contrary
9654 * to the specification, but if we don't get a FIN
9655 * we'll hang forever.
9657 * XXXjl: we should release the tp also, and use a
9660 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
9661 soisdisconnected(so);
9662 tcp_timer_activate(tp, TT_2MSL,
9663 (tcp_fast_finwait2_recycle ?
9664 tcp_finwait2_timeout :
9667 tcp_state_change(tp, TCPS_FIN_WAIT_2);
9670 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9671 tiwin, thflags, nxt_pkt));
9675 * Return value of 1, the TCB is unlocked and most
9676 * likely gone, return value of 0, the TCP is still
9680 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
9681 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
9682 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
9684 int32_t ret_val = 0;
9685 struct tcp_rack *rack;
9688 * Header prediction: check for the two common cases of a
9689 * uni-directional data xfer. If the packet has no control flags,
9690 * is in-sequence, the window didn't change and we're not
9691 * retransmitting, it's a candidate. If the length is zero and the
9692 * ack moved forward, we're the sender side of the xfer. Just free
9693 * the data acked & wake any higher level process that was blocked
9694 * waiting for space. If the length is non-zero and the ack didn't
9695 * move, we're the receiver side. If we're getting packets in-order
9696 * (the reassembly queue is empty), add the data toc The socket
9697 * buffer and note that we need a delayed ack. Make sure that the
9698 * hidden state-flags are also off. Since we check for
9699 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
9701 rack = (struct tcp_rack *)tp->t_fb_ptr;
9702 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
9703 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) &&
9704 __predict_true(SEGQ_EMPTY(tp)) &&
9705 __predict_true(th->th_seq == tp->rcv_nxt)) {
9707 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
9708 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
9712 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
9713 tiwin, nxt_pkt, iptos)) {
9718 ctf_calc_rwin(so, tp);
9720 if ((thflags & TH_RST) ||
9721 (tp->t_fin_is_rst && (thflags & TH_FIN)))
9722 return (ctf_process_rst(m, th, so, tp));
9725 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
9726 * synchronized state.
9728 if (thflags & TH_SYN) {
9729 ctf_challenge_ack(m, th, tp, &ret_val);
9733 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
9734 * it's less than ts_recent, drop it.
9736 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
9737 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
9738 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
9741 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
9745 * If last ACK falls within this segment's sequence numbers, record
9746 * its timestamp. NOTE: 1) That the test incorporates suggestions
9747 * from the latest proposal of the tcplw@cray.com list (Braden
9748 * 1993/04/26). 2) That updating only on newer timestamps interferes
9749 * with our earlier PAWS tests, so this check should be solely
9750 * predicated on the sequence space of this segment. 3) That we
9751 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
9752 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
9753 * SEG.Len, This modified check allows us to overcome RFC1323's
9754 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
9755 * p.869. In such cases, we can still calculate the RTT correctly
9756 * when RCV.NXT == Last.ACK.Sent.
9758 if ((to->to_flags & TOF_TS) != 0 &&
9759 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
9760 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
9761 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
9762 tp->ts_recent_age = tcp_ts_getticks();
9763 tp->ts_recent = to->to_tsval;
9766 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
9767 * is on (half-synchronized state), then queue data for later
9768 * processing; else drop segment and return.
9770 if ((thflags & TH_ACK) == 0) {
9771 if (tp->t_flags & TF_NEEDSYN) {
9773 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9774 tiwin, thflags, nxt_pkt));
9776 } else if (tp->t_flags & TF_ACKNOW) {
9777 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
9778 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output= 1;
9781 ctf_do_drop(m, NULL);
9788 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
9791 if (sbavail(&so->so_snd)) {
9792 if (ctf_progress_timeout_check(tp, true)) {
9793 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
9794 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
9795 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
9799 /* State changes only happen in rack_process_data() */
9800 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9801 tiwin, thflags, nxt_pkt));
9805 * Return value of 1, the TCB is unlocked and most
9806 * likely gone, return value of 0, the TCP is still
9810 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
9811 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
9812 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
9814 int32_t ret_val = 0;
9816 ctf_calc_rwin(so, tp);
9817 if ((thflags & TH_RST) ||
9818 (tp->t_fin_is_rst && (thflags & TH_FIN)))
9819 return (ctf_process_rst(m, th, so, tp));
9821 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
9822 * synchronized state.
9824 if (thflags & TH_SYN) {
9825 ctf_challenge_ack(m, th, tp, &ret_val);
9829 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
9830 * it's less than ts_recent, drop it.
9832 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
9833 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
9834 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
9837 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
9841 * If last ACK falls within this segment's sequence numbers, record
9842 * its timestamp. NOTE: 1) That the test incorporates suggestions
9843 * from the latest proposal of the tcplw@cray.com list (Braden
9844 * 1993/04/26). 2) That updating only on newer timestamps interferes
9845 * with our earlier PAWS tests, so this check should be solely
9846 * predicated on the sequence space of this segment. 3) That we
9847 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
9848 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
9849 * SEG.Len, This modified check allows us to overcome RFC1323's
9850 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
9851 * p.869. In such cases, we can still calculate the RTT correctly
9852 * when RCV.NXT == Last.ACK.Sent.
9854 if ((to->to_flags & TOF_TS) != 0 &&
9855 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
9856 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
9857 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
9858 tp->ts_recent_age = tcp_ts_getticks();
9859 tp->ts_recent = to->to_tsval;
9862 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
9863 * is on (half-synchronized state), then queue data for later
9864 * processing; else drop segment and return.
9866 if ((thflags & TH_ACK) == 0) {
9867 if (tp->t_flags & TF_NEEDSYN) {
9868 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9869 tiwin, thflags, nxt_pkt));
9871 } else if (tp->t_flags & TF_ACKNOW) {
9872 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
9873 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
9876 ctf_do_drop(m, NULL);
9883 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
9886 if (sbavail(&so->so_snd)) {
9887 if (ctf_progress_timeout_check(tp, true)) {
9888 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
9889 tp, tick, PROGRESS_DROP, __LINE__);
9890 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
9891 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
9895 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
9896 tiwin, thflags, nxt_pkt));
9900 rack_check_data_after_close(struct mbuf *m,
9901 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
9903 struct tcp_rack *rack;
9905 rack = (struct tcp_rack *)tp->t_fb_ptr;
9906 if (rack->rc_allow_data_af_clo == 0) {
9908 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
9909 /* tcp_close will kill the inp pre-log the Reset */
9910 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
9912 KMOD_TCPSTAT_INC(tcps_rcvafterclose);
9913 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
9916 if (sbavail(&so->so_snd) == 0)
9918 /* Ok we allow data that is ignored and a followup reset */
9919 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
9920 tp->rcv_nxt = th->th_seq + *tlen;
9921 tp->t_flags2 |= TF2_DROP_AF_DATA;
9922 rack->r_wanted_output = 1;
9928 * Return value of 1, the TCB is unlocked and most
9929 * likely gone, return value of 0, the TCP is still
9933 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
9934 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
9935 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
9937 int32_t ret_val = 0;
9938 int32_t ourfinisacked = 0;
9940 ctf_calc_rwin(so, tp);
9942 if ((thflags & TH_RST) ||
9943 (tp->t_fin_is_rst && (thflags & TH_FIN)))
9944 return (ctf_process_rst(m, th, so, tp));
9946 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
9947 * synchronized state.
9949 if (thflags & TH_SYN) {
9950 ctf_challenge_ack(m, th, tp, &ret_val);
9954 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
9955 * it's less than ts_recent, drop it.
9957 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
9958 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
9959 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
9962 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
9966 * If new data are received on a connection after the user processes
9967 * are gone, then RST the other end.
9969 if ((so->so_state & SS_NOFDREF) && tlen) {
9970 if (rack_check_data_after_close(m, tp, &tlen, th, so))
9974 * If last ACK falls within this segment's sequence numbers, record
9975 * its timestamp. NOTE: 1) That the test incorporates suggestions
9976 * from the latest proposal of the tcplw@cray.com list (Braden
9977 * 1993/04/26). 2) That updating only on newer timestamps interferes
9978 * with our earlier PAWS tests, so this check should be solely
9979 * predicated on the sequence space of this segment. 3) That we
9980 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
9981 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
9982 * SEG.Len, This modified check allows us to overcome RFC1323's
9983 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
9984 * p.869. In such cases, we can still calculate the RTT correctly
9985 * when RCV.NXT == Last.ACK.Sent.
9987 if ((to->to_flags & TOF_TS) != 0 &&
9988 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
9989 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
9990 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
9991 tp->ts_recent_age = tcp_ts_getticks();
9992 tp->ts_recent = to->to_tsval;
9995 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
9996 * is on (half-synchronized state), then queue data for later
9997 * processing; else drop segment and return.
9999 if ((thflags & TH_ACK) == 0) {
10000 if (tp->t_flags & TF_NEEDSYN) {
10001 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10002 tiwin, thflags, nxt_pkt));
10003 } else if (tp->t_flags & TF_ACKNOW) {
10004 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
10005 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
10008 ctf_do_drop(m, NULL);
10015 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
10018 if (ourfinisacked) {
10020 * If we can't receive any more data, then closing user can
10021 * proceed. Starting the timer is contrary to the
10022 * specification, but if we don't get a FIN we'll hang
10025 * XXXjl: we should release the tp also, and use a
10026 * compressed state.
10028 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10029 soisdisconnected(so);
10030 tcp_timer_activate(tp, TT_2MSL,
10031 (tcp_fast_finwait2_recycle ?
10032 tcp_finwait2_timeout :
10035 tcp_state_change(tp, TCPS_FIN_WAIT_2);
10037 if (sbavail(&so->so_snd)) {
10038 if (ctf_progress_timeout_check(tp, true)) {
10039 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
10040 tp, tick, PROGRESS_DROP, __LINE__);
10041 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
10042 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10046 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10047 tiwin, thflags, nxt_pkt));
10051 * Return value of 1, the TCB is unlocked and most
10052 * likely gone, return value of 0, the TCP is still
10056 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
10057 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10058 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
10060 int32_t ret_val = 0;
10061 int32_t ourfinisacked = 0;
10063 ctf_calc_rwin(so, tp);
10065 if ((thflags & TH_RST) ||
10066 (tp->t_fin_is_rst && (thflags & TH_FIN)))
10067 return (ctf_process_rst(m, th, so, tp));
10069 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
10070 * synchronized state.
10072 if (thflags & TH_SYN) {
10073 ctf_challenge_ack(m, th, tp, &ret_val);
10077 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
10078 * it's less than ts_recent, drop it.
10080 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
10081 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
10082 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
10085 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
10089 * If new data are received on a connection after the user processes
10090 * are gone, then RST the other end.
10092 if ((so->so_state & SS_NOFDREF) && tlen) {
10093 if (rack_check_data_after_close(m, tp, &tlen, th, so))
10097 * If last ACK falls within this segment's sequence numbers, record
10098 * its timestamp. NOTE: 1) That the test incorporates suggestions
10099 * from the latest proposal of the tcplw@cray.com list (Braden
10100 * 1993/04/26). 2) That updating only on newer timestamps interferes
10101 * with our earlier PAWS tests, so this check should be solely
10102 * predicated on the sequence space of this segment. 3) That we
10103 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
10104 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
10105 * SEG.Len, This modified check allows us to overcome RFC1323's
10106 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
10107 * p.869. In such cases, we can still calculate the RTT correctly
10108 * when RCV.NXT == Last.ACK.Sent.
10110 if ((to->to_flags & TOF_TS) != 0 &&
10111 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
10112 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
10113 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
10114 tp->ts_recent_age = tcp_ts_getticks();
10115 tp->ts_recent = to->to_tsval;
10118 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
10119 * is on (half-synchronized state), then queue data for later
10120 * processing; else drop segment and return.
10122 if ((thflags & TH_ACK) == 0) {
10123 if (tp->t_flags & TF_NEEDSYN) {
10124 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10125 tiwin, thflags, nxt_pkt));
10126 } else if (tp->t_flags & TF_ACKNOW) {
10127 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
10128 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output= 1;
10131 ctf_do_drop(m, NULL);
10138 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
10141 if (ourfinisacked) {
10146 if (sbavail(&so->so_snd)) {
10147 if (ctf_progress_timeout_check(tp, true)) {
10148 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
10149 tp, tick, PROGRESS_DROP, __LINE__);
10150 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
10151 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10155 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10156 tiwin, thflags, nxt_pkt));
10160 * Return value of 1, the TCB is unlocked and most
10161 * likely gone, return value of 0, the TCP is still
10165 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
10166 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10167 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
10169 int32_t ret_val = 0;
10170 int32_t ourfinisacked = 0;
10172 ctf_calc_rwin(so, tp);
10174 if ((thflags & TH_RST) ||
10175 (tp->t_fin_is_rst && (thflags & TH_FIN)))
10176 return (ctf_process_rst(m, th, so, tp));
10178 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
10179 * synchronized state.
10181 if (thflags & TH_SYN) {
10182 ctf_challenge_ack(m, th, tp, &ret_val);
10186 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
10187 * it's less than ts_recent, drop it.
10189 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
10190 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
10191 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
10194 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
10198 * If new data are received on a connection after the user processes
10199 * are gone, then RST the other end.
10201 if ((so->so_state & SS_NOFDREF) && tlen) {
10202 if (rack_check_data_after_close(m, tp, &tlen, th, so))
10206 * If last ACK falls within this segment's sequence numbers, record
10207 * its timestamp. NOTE: 1) That the test incorporates suggestions
10208 * from the latest proposal of the tcplw@cray.com list (Braden
10209 * 1993/04/26). 2) That updating only on newer timestamps interferes
10210 * with our earlier PAWS tests, so this check should be solely
10211 * predicated on the sequence space of this segment. 3) That we
10212 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
10213 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
10214 * SEG.Len, This modified check allows us to overcome RFC1323's
10215 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
10216 * p.869. In such cases, we can still calculate the RTT correctly
10217 * when RCV.NXT == Last.ACK.Sent.
10219 if ((to->to_flags & TOF_TS) != 0 &&
10220 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
10221 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
10222 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
10223 tp->ts_recent_age = tcp_ts_getticks();
10224 tp->ts_recent = to->to_tsval;
10227 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
10228 * is on (half-synchronized state), then queue data for later
10229 * processing; else drop segment and return.
10231 if ((thflags & TH_ACK) == 0) {
10232 if (tp->t_flags & TF_NEEDSYN) {
10233 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10234 tiwin, thflags, nxt_pkt));
10235 } else if (tp->t_flags & TF_ACKNOW) {
10236 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
10237 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
10240 ctf_do_drop(m, NULL);
10245 * case TCPS_LAST_ACK: Ack processing.
10247 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
10250 if (ourfinisacked) {
10251 tp = tcp_close(tp);
10252 ctf_do_drop(m, tp);
10255 if (sbavail(&so->so_snd)) {
10256 if (ctf_progress_timeout_check(tp, true)) {
10257 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
10258 tp, tick, PROGRESS_DROP, __LINE__);
10259 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
10260 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10264 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10265 tiwin, thflags, nxt_pkt));
10270 * Return value of 1, the TCB is unlocked and most
10271 * likely gone, return value of 0, the TCP is still
10275 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
10276 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10277 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
10279 int32_t ret_val = 0;
10280 int32_t ourfinisacked = 0;
10282 ctf_calc_rwin(so, tp);
10284 /* Reset receive buffer auto scaling when not in bulk receive mode. */
10285 if ((thflags & TH_RST) ||
10286 (tp->t_fin_is_rst && (thflags & TH_FIN)))
10287 return (ctf_process_rst(m, th, so, tp));
10289 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
10290 * synchronized state.
10292 if (thflags & TH_SYN) {
10293 ctf_challenge_ack(m, th, tp, &ret_val);
10297 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
10298 * it's less than ts_recent, drop it.
10300 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
10301 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
10302 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
10305 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
10309 * If new data are received on a connection after the user processes
10310 * are gone, then RST the other end.
10312 if ((so->so_state & SS_NOFDREF) &&
10314 if (rack_check_data_after_close(m, tp, &tlen, th, so))
10318 * If last ACK falls within this segment's sequence numbers, record
10319 * its timestamp. NOTE: 1) That the test incorporates suggestions
10320 * from the latest proposal of the tcplw@cray.com list (Braden
10321 * 1993/04/26). 2) That updating only on newer timestamps interferes
10322 * with our earlier PAWS tests, so this check should be solely
10323 * predicated on the sequence space of this segment. 3) That we
10324 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
10325 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
10326 * SEG.Len, This modified check allows us to overcome RFC1323's
10327 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
10328 * p.869. In such cases, we can still calculate the RTT correctly
10329 * when RCV.NXT == Last.ACK.Sent.
10331 if ((to->to_flags & TOF_TS) != 0 &&
10332 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
10333 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
10334 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
10335 tp->ts_recent_age = tcp_ts_getticks();
10336 tp->ts_recent = to->to_tsval;
10339 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
10340 * is on (half-synchronized state), then queue data for later
10341 * processing; else drop segment and return.
10343 if ((thflags & TH_ACK) == 0) {
10344 if (tp->t_flags & TF_NEEDSYN) {
10345 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10346 tiwin, thflags, nxt_pkt));
10347 } else if (tp->t_flags & TF_ACKNOW) {
10348 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
10349 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
10352 ctf_do_drop(m, NULL);
10359 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
10362 if (sbavail(&so->so_snd)) {
10363 if (ctf_progress_timeout_check(tp, true)) {
10364 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
10365 tp, tick, PROGRESS_DROP, __LINE__);
10366 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
10367 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10371 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10372 tiwin, thflags, nxt_pkt));
10376 rack_clear_rate_sample(struct tcp_rack *rack)
10378 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
10379 rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
10380 rack->r_ctl.rack_rs.rs_rtt_tot = 0;
10384 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line)
10386 uint64_t bw_est, rate_wanted;
10387 uint32_t tls_seg = 0;
10391 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs;
10393 if (rack->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) {
10394 tls_seg = ctf_get_opt_tls_size(rack->rc_inp->inp_socket, rack->rc_tp->snd_wnd);
10395 if (tls_seg != rack->r_ctl.rc_pace_min_segs)
10397 rack->r_ctl.rc_pace_min_segs = tls_seg;
10401 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs)
10403 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp);
10405 if (rack->use_fixed_rate || rack->rc_force_max_seg) {
10406 if (user_max != rack->r_ctl.rc_pace_max_segs)
10409 if (rack->rc_force_max_seg) {
10410 rack->r_ctl.rc_pace_max_segs = user_max;
10411 } else if (rack->use_fixed_rate) {
10412 bw_est = rack_get_bw(rack);
10413 if ((rack->r_ctl.crte == NULL) ||
10414 (bw_est != rack->r_ctl.crte->rate)) {
10415 rack->r_ctl.rc_pace_max_segs = user_max;
10417 /* We are pacing right at the hardware rate */
10420 segsiz = min(ctf_fixed_maxseg(tp),
10421 rack->r_ctl.rc_pace_min_segs);
10422 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(
10424 rack->r_ctl.crte, NULL);
10426 } else if (rack->rc_always_pace) {
10427 if (rack->r_ctl.gp_bw ||
10428 #ifdef NETFLIX_PEAKRATE
10429 rack->rc_tp->t_maxpeakrate ||
10431 rack->r_ctl.init_rate) {
10432 /* We have a rate of some sort set */
10435 bw_est = rack_get_bw(rack);
10436 orig = rack->r_ctl.rc_pace_max_segs;
10437 rate_wanted = rack_get_output_bw(rack, bw_est, NULL);
10439 /* We have something */
10440 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack,
10442 ctf_fixed_maxseg(rack->rc_tp));
10444 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs;
10445 if (orig != rack->r_ctl.rc_pace_max_segs)
10447 } else if ((rack->r_ctl.gp_bw == 0) &&
10448 (rack->r_ctl.rc_pace_max_segs == 0)) {
10450 * If we have nothing limit us to bursting
10451 * out IW sized pieces.
10454 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack);
10457 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) {
10459 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES;
10464 if (tls_seg != 0) {
10465 orig = rack->r_ctl.rc_pace_max_segs;
10466 if (rack_hw_tls_max_seg > 1) {
10467 rack->r_ctl.rc_pace_max_segs /= tls_seg;
10468 if (rack_hw_tls_max_seg > rack->r_ctl.rc_pace_max_segs)
10469 rack->r_ctl.rc_pace_max_segs = rack_hw_tls_max_seg;
10471 rack->r_ctl.rc_pace_max_segs = 1;
10473 if (rack->r_ctl.rc_pace_max_segs == 0)
10474 rack->r_ctl.rc_pace_max_segs = 1;
10475 rack->r_ctl.rc_pace_max_segs *= tls_seg;
10476 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) {
10477 /* We can't go over the max bytes (usually 64k) */
10478 rack->r_ctl.rc_pace_max_segs = ((PACE_MAX_IP_BYTES / tls_seg) * tls_seg);
10480 if (orig != rack->r_ctl.rc_pace_max_segs)
10485 rack_log_type_hrdwtso(tp, rack, tls_seg, rack->rc_inp->inp_socket->so_snd.sb_flags, line, 2);
10489 rack_init(struct tcpcb *tp)
10491 struct tcp_rack *rack = NULL;
10492 struct rack_sendmap *insret;
10493 uint32_t iwin, snt, us_cts;
10495 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
10496 if (tp->t_fb_ptr == NULL) {
10498 * We need to allocate memory but cant. The INP and INP_INFO
10499 * locks and they are recusive (happens during setup. So a
10500 * scheme to drop the locks fails :(
10505 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack));
10507 rack = (struct tcp_rack *)tp->t_fb_ptr;
10508 RB_INIT(&rack->r_ctl.rc_mtree);
10509 TAILQ_INIT(&rack->r_ctl.rc_free);
10510 TAILQ_INIT(&rack->r_ctl.rc_tmap);
10513 rack->rc_inp = tp->t_inpcb;
10515 /* Probably not needed but lets be sure */
10516 rack_clear_rate_sample(rack);
10517 rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
10518 rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
10519 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
10521 rack->use_rack_rr = 1;
10522 if (V_tcp_delack_enabled)
10523 tp->t_delayed_ack = 1;
10525 tp->t_delayed_ack = 0;
10526 if (rack_enable_shared_cwnd)
10527 rack->rack_enable_scwnd = 1;
10528 rack->rc_user_set_max_segs = rack_hptsi_segments;
10529 rack->rc_force_max_seg = 0;
10530 if (rack_use_imac_dack)
10531 rack->rc_dack_mode = 1;
10532 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
10533 rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
10534 rack->r_ctl.rc_prop_reduce = rack_use_proportional_reduce;
10535 rack->r_ctl.rc_prop_rate = rack_proportional_rate;
10536 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
10537 rack->r_ctl.rc_early_recovery = rack_early_recovery;
10538 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff;
10539 rack->r_ctl.rc_highest_us_rtt = 0;
10540 if (rack_disable_prr)
10541 rack->rack_no_prr = 1;
10542 if (rack_gp_no_rec_chg)
10543 rack->rc_gp_no_rec_chg = 1;
10544 rack->rc_always_pace = rack_pace_every_seg;
10545 if (rack_enable_mqueue_for_nonpaced)
10546 rack->r_mbuf_queue = 1;
10548 rack->r_mbuf_queue = 0;
10549 if (rack->r_mbuf_queue || rack->rc_always_pace)
10550 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
10552 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
10553 rack_set_pace_segments(tp, rack, __LINE__);
10554 if (rack_limits_scwnd)
10555 rack->r_limit_scw = 1;
10557 rack->r_limit_scw = 0;
10558 rack->r_ctl.rc_high_rwnd = tp->snd_wnd;
10559 rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
10560 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
10561 rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
10562 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
10563 rack->r_ctl.rc_min_to = rack_min_to;
10564 microuptime(&rack->r_ctl.act_rcv_time);
10565 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
10566 rack->r_running_late = 0;
10567 rack->r_running_early = 0;
10568 rack->rc_init_win = rack_default_init_window;
10569 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss;
10570 if (rack_do_dyn_mul) {
10571 /* When dynamic adjustment is on CA needs to start at 100% */
10572 rack->rc_gp_dyn_mul = 1;
10573 if (rack_do_dyn_mul >= 100)
10574 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
10576 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
10577 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec;
10578 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
10579 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
10580 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN,
10581 rack_probertt_filter_life);
10582 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
10583 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
10584 rack->r_ctl.rc_time_of_last_probertt = us_cts;
10585 rack->r_ctl.rc_time_probertt_starts = 0;
10586 /* Do we force on detection? */
10587 #ifdef NETFLIX_EXP_DETECTION
10588 if (tcp_force_detection)
10589 rack->do_detection = 1;
10592 rack->do_detection = 0;
10593 if (rack_non_rxt_use_cr)
10594 rack->rack_rec_nonrxt_use_cr = 1;
10595 if (tp->snd_una != tp->snd_max) {
10596 /* Create a send map for the current outstanding data */
10597 struct rack_sendmap *rsm;
10599 rsm = rack_alloc(rack);
10601 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
10602 tp->t_fb_ptr = NULL;
10605 rsm->r_flags = RACK_OVERMAX;
10606 rsm->r_tim_lastsent[0] = rack->r_ctl.rc_tlp_rxt_last_time;
10607 rsm->r_rtr_cnt = 1;
10608 rsm->r_rtr_bytes = 0;
10609 rsm->r_start = tp->snd_una;
10610 rsm->r_end = tp->snd_max;
10611 rsm->usec_orig_send = us_cts;
10613 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
10615 if (insret != NULL) {
10616 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p",
10617 insret, rack, rsm);
10620 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
10621 rsm->r_in_tmap = 1;
10623 /* Cancel the GP measurement in progress */
10624 tp->t_flags &= ~TF_GPUTINPROG;
10625 if (SEQ_GT(tp->snd_max, tp->iss))
10626 snt = tp->snd_max - tp->iss;
10629 iwin = rc_init_window(rack);
10631 /* We are not past the initial window
10632 * so we need to make sure cwnd is
10635 if (tp->snd_cwnd < iwin)
10636 tp->snd_cwnd = iwin;
10638 * If we are within the initial window
10639 * we want ssthresh to be unlimited. Setting
10640 * it to the rwnd (which the default stack does
10641 * and older racks) is not really a good idea
10642 * since we want to be in SS and grow both the
10643 * cwnd and the rwnd (via dynamic rwnd growth). If
10644 * we set it to the rwnd then as the peer grows its
10645 * rwnd we will be stuck in CA and never hit SS.
10647 * Its far better to raise it up high (this takes the
10648 * risk that there as been a loss already, probably
10649 * we should have an indicator in all stacks of loss
10650 * but we don't), but considering the normal use this
10651 * is a risk worth taking. The consequences of not
10652 * hitting SS are far worse than going one more time
10653 * into it early on (before we have sent even a IW).
10654 * It is highly unlikely that we will have had a loss
10655 * before getting the IW out.
10657 tp->snd_ssthresh = 0xffffffff;
10659 rack_stop_all_timers(tp);
10660 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0);
10661 rack_log_rtt_shrinks(rack, us_cts, 0,
10662 __LINE__, RACK_RTTS_INIT);
10667 rack_handoff_ok(struct tcpcb *tp)
10669 if ((tp->t_state == TCPS_CLOSED) ||
10670 (tp->t_state == TCPS_LISTEN)) {
10671 /* Sure no problem though it may not stick */
10674 if ((tp->t_state == TCPS_SYN_SENT) ||
10675 (tp->t_state == TCPS_SYN_RECEIVED)) {
10677 * We really don't know you have to get to ESTAB or beyond
10682 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){
10686 * If we reach here we don't do SACK on this connection so we can
10693 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
10695 if (tp->t_fb_ptr) {
10696 struct tcp_rack *rack;
10697 struct rack_sendmap *rsm, *nrsm, *rm;
10699 rack = (struct tcp_rack *)tp->t_fb_ptr;
10700 #ifdef NETFLIX_SHARED_CWND
10701 if (rack->r_ctl.rc_scw) {
10704 if (rack->r_limit_scw)
10705 limit = max(1, rack->r_ctl.rc_lowest_us_rtt);
10708 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw,
10709 rack->r_ctl.rc_scw_index,
10711 rack->r_ctl.rc_scw = NULL;
10714 /* rack does not use force data but other stacks may clear it */
10715 tp->t_flags &= ~TF_FORCEDATA;
10717 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
10718 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
10719 tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
10721 #ifdef TCP_BLACKBOX
10722 tcp_log_flowend(tp);
10724 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) {
10725 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
10728 panic("At fini, rack:%p rsm:%p rm:%p",
10732 uma_zfree(rack_zone, rsm);
10734 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
10736 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
10737 uma_zfree(rack_zone, rsm);
10738 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
10740 rack->rc_free_cnt = 0;
10741 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
10742 tp->t_fb_ptr = NULL;
10744 /* Cancel the GP measurement in progress */
10745 tp->t_flags &= ~TF_GPUTINPROG;
10746 /* Make sure snd_nxt is correctly set */
10747 tp->snd_nxt = tp->snd_max;
10752 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
10754 switch (tp->t_state) {
10755 case TCPS_SYN_SENT:
10756 rack->r_state = TCPS_SYN_SENT;
10757 rack->r_substate = rack_do_syn_sent;
10759 case TCPS_SYN_RECEIVED:
10760 rack->r_state = TCPS_SYN_RECEIVED;
10761 rack->r_substate = rack_do_syn_recv;
10763 case TCPS_ESTABLISHED:
10764 rack_set_pace_segments(tp, rack, __LINE__);
10765 rack->r_state = TCPS_ESTABLISHED;
10766 rack->r_substate = rack_do_established;
10768 case TCPS_CLOSE_WAIT:
10769 rack->r_state = TCPS_CLOSE_WAIT;
10770 rack->r_substate = rack_do_close_wait;
10772 case TCPS_FIN_WAIT_1:
10773 rack->r_state = TCPS_FIN_WAIT_1;
10774 rack->r_substate = rack_do_fin_wait_1;
10777 rack->r_state = TCPS_CLOSING;
10778 rack->r_substate = rack_do_closing;
10780 case TCPS_LAST_ACK:
10781 rack->r_state = TCPS_LAST_ACK;
10782 rack->r_substate = rack_do_lastack;
10784 case TCPS_FIN_WAIT_2:
10785 rack->r_state = TCPS_FIN_WAIT_2;
10786 rack->r_substate = rack_do_fin_wait_2;
10790 case TCPS_TIME_WAIT:
10798 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
10801 * We received an ack, and then did not
10802 * call send or were bounced out due to the
10803 * hpts was running. Now a timer is up as well, is
10804 * it the right timer?
10806 struct rack_sendmap *rsm;
10809 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
10810 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
10812 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
10813 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
10814 (tmr_up == PACE_TMR_RXT)) {
10815 /* Should be an RXT */
10819 /* Nothing outstanding? */
10820 if (tp->t_flags & TF_DELACK) {
10821 if (tmr_up == PACE_TMR_DELACK)
10822 /* We are supposed to have delayed ack up and we do */
10824 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) {
10826 * if we hit enobufs then we would expect the possiblity
10827 * of nothing outstanding and the RXT up (and the hptsi timer).
10830 } else if (((V_tcp_always_keepalive ||
10831 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
10832 (tp->t_state <= TCPS_CLOSING)) &&
10833 (tmr_up == PACE_TMR_KEEP) &&
10834 (tp->snd_max == tp->snd_una)) {
10835 /* We should have keep alive up and we do */
10839 if (SEQ_GT(tp->snd_max, tp->snd_una) &&
10840 ((tmr_up == PACE_TMR_TLP) ||
10841 (tmr_up == PACE_TMR_RACK) ||
10842 (tmr_up == PACE_TMR_RXT))) {
10844 * Either a Rack, TLP or RXT is fine if we
10845 * have outstanding data.
10848 } else if (tmr_up == PACE_TMR_DELACK) {
10850 * If the delayed ack was going to go off
10851 * before the rtx/tlp/rack timer were going to
10852 * expire, then that would be the timer in control.
10853 * Note we don't check the time here trusting the
10859 * Ok the timer originally started is not what we want now.
10860 * We will force the hpts to be stopped if any, and restart
10861 * with the slot set to what was in the saved slot.
10863 if (rack->rc_inp->inp_in_hpts) {
10864 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
10867 us_cts = tcp_get_usecs(NULL);
10868 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
10870 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
10872 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
10874 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
10876 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
10877 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0);
10881 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
10882 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
10883 int32_t nxt_pkt, struct timeval *tv)
10885 int32_t thflags, retval, did_out = 0;
10886 int32_t way_out = 0;
10889 struct timespec ts;
10891 struct tcp_rack *rack;
10892 struct rack_sendmap *rsm;
10893 int32_t prev_state = 0;
10896 * tv passed from common code is from either M_TSTMP_LRO or
10897 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. The
10898 * rack_pacing stack assumes tv always refers to 'now', so we overwrite
10899 * tv here to guarantee that.
10901 if (m->m_flags & M_TSTMP_LRO)
10904 cts = tcp_tv_to_mssectick(tv);
10905 rack = (struct tcp_rack *)tp->t_fb_ptr;
10907 if ((m->m_flags & M_TSTMP) ||
10908 (m->m_flags & M_TSTMP_LRO)) {
10909 mbuf_tstmp2timespec(m, &ts);
10910 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
10911 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
10913 rack->r_ctl.act_rcv_time = *tv;
10914 kern_prefetch(rack, &prev_state);
10916 thflags = th->th_flags;
10918 NET_EPOCH_ASSERT();
10919 INP_WLOCK_ASSERT(tp->t_inpcb);
10920 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
10922 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
10924 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
10925 union tcp_log_stackspecific log;
10926 struct timeval ltv;
10927 #ifdef NETFLIX_HTTP_LOGGING
10928 struct http_sendfile_track *http_req;
10930 if (SEQ_GT(th->th_ack, tp->snd_una)) {
10931 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1));
10933 http_req = tcp_http_find_req_for_seq(tp, th->th_ack);
10936 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
10937 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
10938 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
10939 if (rack->rack_no_prr == 0)
10940 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
10942 log.u_bbr.flex1 = 0;
10943 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
10944 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
10945 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
10946 log.u_bbr.flex3 = m->m_flags;
10947 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
10948 if (m->m_flags & M_TSTMP) {
10949 /* Record the hardware timestamp if present */
10950 mbuf_tstmp2timespec(m, &ts);
10951 ltv.tv_sec = ts.tv_sec;
10952 ltv.tv_usec = ts.tv_nsec / 1000;
10953 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v);
10954 } else if (m->m_flags & M_TSTMP_LRO) {
10955 /* Record the LRO the arrival timestamp */
10956 mbuf_tstmp2timespec(m, &ts);
10957 ltv.tv_sec = ts.tv_sec;
10958 ltv.tv_usec = ts.tv_nsec / 1000;
10959 log.u_bbr.flex5 = tcp_tv_to_usectick(<v);
10961 log.u_bbr.timeStamp = tcp_get_usecs(<v);
10962 /* Log the rcv time */
10963 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp;
10964 #ifdef NETFLIX_HTTP_LOGGING
10965 log.u_bbr.applimited = tp->t_http_closed;
10966 log.u_bbr.applimited <<= 8;
10967 log.u_bbr.applimited |= tp->t_http_open;
10968 log.u_bbr.applimited <<= 8;
10969 log.u_bbr.applimited |= tp->t_http_req;
10971 /* Copy out any client req info */
10973 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
10975 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
10976 log.u_bbr.rttProp = http_req->timestamp;
10977 log.u_bbr.cur_del_rate = http_req->start;
10978 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
10979 log.u_bbr.flex8 |= 1;
10981 log.u_bbr.flex8 |= 2;
10982 log.u_bbr.bw_inuse = http_req->end;
10984 log.u_bbr.flex6 = http_req->start_seq;
10985 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
10986 log.u_bbr.flex8 |= 4;
10987 log.u_bbr.epoch = http_req->end_seq;
10991 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
10992 tlen, &log, true, <v);
10994 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
10997 goto done_with_input;
11000 * If a segment with the ACK-bit set arrives in the SYN-SENT state
11001 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9.
11003 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
11004 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
11005 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
11006 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11010 * Segment received on connection. Reset idle time and keep-alive
11011 * timer. XXX: This should be done after segment validation to
11012 * ignore broken/spoofed segs.
11014 if (tp->t_idle_reduce &&
11015 (tp->snd_max == tp->snd_una) &&
11016 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
11017 counter_u64_add(rack_input_idle_reduces, 1);
11018 rack_cc_after_idle(rack, tp);
11020 tp->t_rcvtime = ticks;
11022 * Unscale the window into a 32-bit value. For the SYN_SENT state
11023 * the scale is zero.
11025 tiwin = th->th_win << tp->snd_scale;
11027 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
11029 if (tiwin > rack->r_ctl.rc_high_rwnd)
11030 rack->r_ctl.rc_high_rwnd = tiwin;
11032 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
11033 * this to occur after we've validated the segment.
11035 if (tp->t_flags2 & TF2_ECN_PERMIT) {
11036 if (thflags & TH_CWR) {
11037 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
11038 tp->t_flags |= TF_ACKNOW;
11040 switch (iptos & IPTOS_ECN_MASK) {
11042 tp->t_flags2 |= TF2_ECN_SND_ECE;
11043 KMOD_TCPSTAT_INC(tcps_ecn_ce);
11045 case IPTOS_ECN_ECT0:
11046 KMOD_TCPSTAT_INC(tcps_ecn_ect0);
11048 case IPTOS_ECN_ECT1:
11049 KMOD_TCPSTAT_INC(tcps_ecn_ect1);
11053 /* Process a packet differently from RFC3168. */
11054 cc_ecnpkt_handler(tp, th, iptos);
11056 /* Congestion experienced. */
11057 if (thflags & TH_ECE) {
11058 rack_cong_signal(tp, th, CC_ECN);
11062 * Parse options on any incoming segment.
11064 tcp_dooptions(&to, (u_char *)(th + 1),
11065 (th->th_off << 2) - sizeof(struct tcphdr),
11066 (thflags & TH_SYN) ? TO_SYN : 0);
11069 * If echoed timestamp is later than the current time, fall back to
11070 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
11071 * were used when this connection was established.
11073 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
11074 to.to_tsecr -= tp->ts_offset;
11075 if (TSTMP_GT(to.to_tsecr, cts))
11080 * If its the first time in we need to take care of options and
11081 * verify we can do SACK for rack!
11083 if (rack->r_state == 0) {
11084 /* Should be init'd by rack_init() */
11085 KASSERT(rack->rc_inp != NULL,
11086 ("%s: rack->rc_inp unexpectedly NULL", __func__));
11087 if (rack->rc_inp == NULL) {
11088 rack->rc_inp = tp->t_inpcb;
11092 * Process options only when we get SYN/ACK back. The SYN
11093 * case for incoming connections is handled in tcp_syncache.
11094 * According to RFC1323 the window field in a SYN (i.e., a
11095 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
11096 * this is traditional behavior, may need to be cleaned up.
11098 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
11099 /* Handle parallel SYN for ECN */
11100 if (!(thflags & TH_ACK) &&
11101 ((thflags & (TH_CWR | TH_ECE)) == (TH_CWR | TH_ECE)) &&
11102 ((V_tcp_do_ecn == 1) || (V_tcp_do_ecn == 2))) {
11103 tp->t_flags2 |= TF2_ECN_PERMIT;
11104 tp->t_flags2 |= TF2_ECN_SND_ECE;
11105 TCPSTAT_INC(tcps_ecn_shs);
11107 if ((to.to_flags & TOF_SCALE) &&
11108 (tp->t_flags & TF_REQ_SCALE)) {
11109 tp->t_flags |= TF_RCVD_SCALE;
11110 tp->snd_scale = to.to_wscale;
11112 tp->t_flags &= ~TF_REQ_SCALE;
11114 * Initial send window. It will be updated with the
11115 * next incoming segment to the scaled value.
11117 tp->snd_wnd = th->th_win;
11118 if ((to.to_flags & TOF_TS) &&
11119 (tp->t_flags & TF_REQ_TSTMP)) {
11120 tp->t_flags |= TF_RCVD_TSTMP;
11121 tp->ts_recent = to.to_tsval;
11122 tp->ts_recent_age = cts;
11124 tp->t_flags &= ~TF_REQ_TSTMP;
11125 if (to.to_flags & TOF_MSS)
11126 tcp_mss(tp, to.to_mss);
11127 if ((tp->t_flags & TF_SACK_PERMIT) &&
11128 (to.to_flags & TOF_SACKPERM) == 0)
11129 tp->t_flags &= ~TF_SACK_PERMIT;
11130 if (IS_FASTOPEN(tp->t_flags)) {
11131 if (to.to_flags & TOF_FASTOPEN) {
11134 if (to.to_flags & TOF_MSS)
11137 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
11141 tcp_fastopen_update_cache(tp, mss,
11142 to.to_tfo_len, to.to_tfo_cookie);
11144 tcp_fastopen_disable_path(tp);
11148 * At this point we are at the initial call. Here we decide
11149 * if we are doing RACK or not. We do this by seeing if
11150 * TF_SACK_PERMIT is set and the sack-not-required is clear.
11151 * The code now does do dup-ack counting so if you don't
11152 * switch back you won't get rack & TLP, but you will still
11156 if ((rack_sack_not_required == 0) &&
11157 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
11158 tcp_switch_back_to_default(tp);
11159 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen,
11164 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
11165 tcp_set_hpts(tp->t_inpcb);
11166 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
11168 if (thflags & TH_FIN)
11169 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
11170 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
11171 if ((rack->rc_gp_dyn_mul) &&
11172 (rack->use_fixed_rate == 0) &&
11173 (rack->rc_always_pace)) {
11174 /* Check in on probertt */
11175 rack_check_probe_rtt(rack, us_cts);
11177 if (rack->forced_ack) {
11181 * A persist or keep-alive was forced out, update our
11182 * min rtt time. Note we do not worry about lost
11183 * retransmissions since KEEP-ALIVES and persists
11184 * are usually way long on times of sending (though
11185 * if we were really paranoid or worried we could
11186 * at least use timestamps if available to validate).
11188 rack->forced_ack = 0;
11189 us_rtt = us_cts - rack->r_ctl.forced_ack_ts;
11192 rack_log_rtt_upd(tp, rack, us_rtt, 0, NULL, 3);
11193 rack_apply_updated_usrtt(rack, us_rtt, us_cts);
11196 * This is the one exception case where we set the rack state
11197 * always. All other times (timers etc) we must have a rack-state
11198 * set (so we assure we have done the checks above for SACK).
11200 rack->r_ctl.rc_rcvtime = cts;
11201 if (rack->r_state != tp->t_state)
11202 rack_set_state(tp, rack);
11203 if (SEQ_GT(th->th_ack, tp->snd_una) &&
11204 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL)
11205 kern_prefetch(rsm, &prev_state);
11206 prev_state = rack->r_state;
11207 rack_clear_rate_sample(rack);
11208 retval = (*rack->r_substate) (m, th, so,
11209 tp, &to, drop_hdrlen,
11210 tlen, tiwin, thflags, nxt_pkt, iptos);
11212 if ((retval == 0) &&
11213 (tp->t_inpcb == NULL)) {
11214 panic("retval:%d tp:%p t_inpcb:NULL state:%d",
11215 retval, tp, prev_state);
11220 * If retval is 1 the tcb is unlocked and most likely the tp
11223 INP_WLOCK_ASSERT(tp->t_inpcb);
11224 if ((rack->rc_gp_dyn_mul) &&
11225 (rack->rc_always_pace) &&
11226 (rack->use_fixed_rate == 0) &&
11227 rack->in_probe_rtt &&
11228 (rack->r_ctl.rc_time_probertt_starts == 0)) {
11230 * If we are going for target, lets recheck before
11233 rack_check_probe_rtt(rack, us_cts);
11235 if (rack->set_pacing_done_a_iw == 0) {
11236 /* How much has been acked? */
11237 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) {
11238 /* We have enough to set in the pacing segment size */
11239 rack->set_pacing_done_a_iw = 1;
11240 rack_set_pace_segments(tp, rack, __LINE__);
11243 tcp_rack_xmit_timer_commit(rack, tp);
11244 if (nxt_pkt == 0) {
11245 if (rack->r_wanted_output != 0) {
11248 (void)tp->t_fb->tfb_tcp_output(tp);
11250 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
11252 if ((nxt_pkt == 0) &&
11253 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
11254 (SEQ_GT(tp->snd_max, tp->snd_una) ||
11255 (tp->t_flags & TF_DELACK) ||
11256 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
11257 (tp->t_state <= TCPS_CLOSING)))) {
11258 /* We could not send (probably in the hpts but stopped the timer earlier)? */
11259 if ((tp->snd_max == tp->snd_una) &&
11260 ((tp->t_flags & TF_DELACK) == 0) &&
11261 (rack->rc_inp->inp_in_hpts) &&
11262 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
11263 /* keep alive not needed if we are hptsi output yet */
11267 if (rack->rc_inp->inp_in_hpts) {
11268 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
11269 us_cts = tcp_get_usecs(NULL);
11270 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
11272 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
11275 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
11277 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
11279 if (late && (did_out == 0)) {
11281 * We are late in the sending
11282 * and we did not call the output
11283 * (this probably should not happen).
11285 goto do_output_now;
11287 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0);
11290 } else if (nxt_pkt == 0) {
11291 /* Do we have the correct timer running? */
11292 rack_timer_audit(tp, rack, &so->so_snd);
11296 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out);
11298 rack->r_wanted_output = 0;
11300 if (tp->t_inpcb == NULL) {
11301 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d",
11303 retval, tp, prev_state);
11311 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
11312 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos)
11316 /* First lets see if we have old packets */
11317 if (tp->t_in_pkt) {
11318 if (ctf_do_queued_segments(so, tp, 1)) {
11323 if (m->m_flags & M_TSTMP_LRO) {
11324 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000;
11325 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000;
11327 /* Should not be should we kassert instead? */
11328 tcp_get_usecs(&tv);
11330 if(rack_do_segment_nounlock(m, th, so, tp,
11331 drop_hdrlen, tlen, iptos, 0, &tv) == 0)
11332 INP_WUNLOCK(tp->t_inpcb);
11335 struct rack_sendmap *
11336 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
11338 struct rack_sendmap *rsm = NULL;
11340 uint32_t srtt = 0, thresh = 0, ts_low = 0;
11342 /* Return the next guy to be re-transmitted */
11343 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
11346 if (tp->t_flags & TF_SENTFIN) {
11347 /* retran the end FIN? */
11350 /* ok lets look at this one */
11351 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
11352 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
11355 rsm = rack_find_lowest_rsm(rack);
11360 if (rsm->r_flags & RACK_ACKED) {
11363 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) &&
11364 (rsm->r_dupack < DUP_ACK_THRESHOLD)) {
11365 /* Its not yet ready */
11368 srtt = rack_grab_rtt(tp, rack);
11369 idx = rsm->r_rtr_cnt - 1;
11370 ts_low = rsm->r_tim_lastsent[idx];
11371 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
11372 if ((tsused == ts_low) ||
11373 (TSTMP_LT(tsused, ts_low))) {
11374 /* No time since sending */
11377 if ((tsused - ts_low) < thresh) {
11378 /* It has not been long enough yet */
11381 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
11382 ((rsm->r_flags & RACK_SACK_PASSED) &&
11383 (rack->sack_attack_disable == 0))) {
11385 * We have passed the dup-ack threshold <or>
11386 * a SACK has indicated this is missing.
11387 * Note that if you are a declared attacker
11388 * it is only the dup-ack threshold that
11389 * will cause retransmits.
11391 /* log retransmit reason */
11392 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1);
11399 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
11400 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method,
11401 int line, struct rack_sendmap *rsm)
11403 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
11404 union tcp_log_stackspecific log;
11407 memset(&log, 0, sizeof(log));
11408 log.u_bbr.flex1 = slot;
11409 log.u_bbr.flex2 = len;
11410 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs;
11411 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs;
11412 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss;
11413 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca;
11414 log.u_bbr.use_lt_bw = rack->app_limited_needs_set;
11415 log.u_bbr.use_lt_bw <<= 1;
11416 log.u_bbr.use_lt_bw = rack->rc_gp_filled;
11417 log.u_bbr.use_lt_bw <<= 1;
11418 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
11419 log.u_bbr.use_lt_bw <<= 1;
11420 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
11421 log.u_bbr.pkt_epoch = line;
11422 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec;
11423 log.u_bbr.bw_inuse = bw_est;
11424 log.u_bbr.delRate = bw;
11425 if (rack->r_ctl.gp_bw == 0)
11426 log.u_bbr.cur_del_rate = 0;
11428 log.u_bbr.cur_del_rate = rack_get_bw(rack);
11429 log.u_bbr.rttProp = len_time;
11430 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt;
11431 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit;
11432 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
11433 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) {
11434 /* We are in slow start */
11435 log.u_bbr.flex7 = 1;
11437 /* we are on congestion avoidance */
11438 log.u_bbr.flex7 = 0;
11440 log.u_bbr.flex8 = method;
11441 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
11442 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
11443 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec;
11444 log.u_bbr.cwnd_gain <<= 1;
11445 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
11446 log.u_bbr.cwnd_gain <<= 1;
11447 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
11448 TCP_LOG_EVENTP(rack->rc_tp, NULL,
11449 &rack->rc_inp->inp_socket->so_rcv,
11450 &rack->rc_inp->inp_socket->so_snd,
11451 BBR_LOG_HPTSI_CALC, 0,
11452 0, &log, false, &tv);
11457 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss)
11459 uint32_t new_tso, user_max;
11461 user_max = rack->rc_user_set_max_segs * mss;
11462 if (rack->rc_force_max_seg) {
11465 if (rack->use_fixed_rate &&
11466 ((rack->r_ctl.crte == NULL) ||
11467 (bw != rack->r_ctl.crte->rate))) {
11468 /* Use the user mss since we are not exactly matched */
11471 new_tso = tcp_get_pacing_burst_size(bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL);
11472 if (new_tso > user_max)
11473 new_tso = user_max;
11478 rack_log_hdwr_pacing(struct tcp_rack *rack, const struct ifnet *ifp,
11479 uint64_t rate, uint64_t hw_rate, int line,
11482 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
11483 union tcp_log_stackspecific log;
11486 memset(&log, 0, sizeof(log));
11487 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff);
11488 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff);
11489 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff);
11490 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff);
11491 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
11492 log.u_bbr.bw_inuse = rate;
11493 log.u_bbr.flex5 = line;
11494 log.u_bbr.flex6 = error;
11495 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs;
11496 log.u_bbr.flex8 = rack->use_fixed_rate;
11497 log.u_bbr.flex8 <<= 1;
11498 log.u_bbr.flex8 |= rack->rack_hdrw_pacing;
11499 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
11500 TCP_LOG_EVENTP(rack->rc_tp, NULL,
11501 &rack->rc_inp->inp_socket->so_rcv,
11502 &rack->rc_inp->inp_socket->so_snd,
11503 BBR_LOG_HDWR_PACE, 0,
11504 0, &log, false, &tv);
11509 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz)
11511 uint64_t lentim, fill_bw;
11513 /* Lets first see if we are full, if so continue with normal rate */
11514 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use)
11516 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd)
11518 if (rack->r_ctl.rc_last_us_rtt == 0)
11520 if (rack->rc_pace_fill_if_rttin_range &&
11521 (rack->r_ctl.rc_last_us_rtt >=
11522 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) {
11523 /* The rtt is huge, N * smallest, lets not fill */
11527 * first lets calculate the b/w based on the last us-rtt
11530 fill_bw = rack->r_ctl.cwnd_to_use;
11531 /* Take the rwnd if its smaller */
11532 if (fill_bw > rack->rc_tp->snd_wnd)
11533 fill_bw = rack->rc_tp->snd_wnd;
11534 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC;
11535 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt;
11536 /* We are below the min b/w */
11537 if (fill_bw < RACK_MIN_BW)
11540 * Ok fill_bw holds our mythical b/w to fill the cwnd
11541 * in a rtt, what does that time wise equate too?
11543 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC;
11545 if (lentim < slot) {
11546 rack_log_pacing_delay_calc(rack, len, slot, fill_bw,
11547 0, lentim, 12, __LINE__, NULL);
11548 return ((int32_t)lentim);
11554 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz)
11556 struct rack_sendmap *lrsm;
11560 if (rack->rc_always_pace == 0) {
11562 * We use the most optimistic possible cwnd/srtt for
11563 * sending calculations. This will make our
11564 * calculation anticipate getting more through
11565 * quicker then possible. But thats ok we don't want
11566 * the peer to have a gap in data sending.
11568 uint32_t srtt, cwnd, tr_perms = 0;
11569 int32_t reduce = 0;
11573 * We keep no precise pacing with the old method
11574 * instead we use the pacer to mitigate bursts.
11576 rack->r_ctl.rc_agg_delayed = 0;
11579 rack->r_ctl.rc_agg_early = 0;
11580 if (rack->r_ctl.rc_rack_min_rtt)
11581 srtt = rack->r_ctl.rc_rack_min_rtt;
11583 srtt = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT));
11584 if (rack->r_ctl.rc_rack_largest_cwnd)
11585 cwnd = rack->r_ctl.rc_rack_largest_cwnd;
11587 cwnd = rack->r_ctl.cwnd_to_use;
11588 tr_perms = cwnd / srtt;
11589 if (tr_perms == 0) {
11590 tr_perms = ctf_fixed_maxseg(tp);
11593 * Calculate how long this will take to drain, if
11594 * the calculation comes out to zero, thats ok we
11595 * will use send_a_lot to possibly spin around for
11596 * more increasing tot_len_this_send to the point
11597 * that its going to require a pace, or we hit the
11598 * cwnd. Which in that case we are just waiting for
11601 slot = len / tr_perms;
11602 /* Now do we reduce the time so we don't run dry? */
11603 if (slot && rack_slot_reduction) {
11604 reduce = (slot / rack_slot_reduction);
11605 if (reduce < slot) {
11610 slot *= HPTS_USEC_IN_MSEC;
11613 * We always consider ourselves app limited with old style
11614 * that are not retransmits. This could be the initial
11615 * measurement, but thats ok its all setup and specially
11616 * handled. If another send leaks out, then that too will
11617 * be mark app-limited.
11619 lrsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
11620 if (lrsm && ((lrsm->r_flags & RACK_APP_LIMITED) == 0)) {
11621 rack->r_ctl.rc_first_appl = lrsm;
11622 lrsm->r_flags |= RACK_APP_LIMITED;
11623 rack->r_ctl.rc_app_limited_cnt++;
11626 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL);
11628 uint64_t bw_est, res, lentim, rate_wanted;
11629 uint32_t orig_val, srtt, segs, oh;
11631 if ((rack->r_rr_config == 1) && rsm) {
11632 return (rack->r_ctl.rc_min_to * HPTS_USEC_IN_MSEC);
11634 if (rack->use_fixed_rate) {
11635 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack);
11636 } else if ((rack->r_ctl.init_rate == 0) &&
11637 #ifdef NETFLIX_PEAKRATE
11638 (rack->rc_tp->t_maxpeakrate == 0) &&
11640 (rack->r_ctl.gp_bw == 0)) {
11641 /* no way to yet do an estimate */
11642 bw_est = rate_wanted = 0;
11644 bw_est = rack_get_bw(rack);
11645 rate_wanted = rack_get_output_bw(rack, bw_est, rsm);
11647 if ((bw_est == 0) || (rate_wanted == 0)) {
11649 * No way yet to make a b/w estimate or
11650 * our raise is set incorrectly.
11654 /* We need to account for all the overheads */
11655 segs = (len + segsiz - 1) / segsiz;
11657 * We need the diff between 1514 bytes (e-mtu with e-hdr)
11658 * and how much data we put in each packet. Yes this
11659 * means we may be off if we are larger than 1500 bytes
11660 * or smaller. But this just makes us more conservative.
11662 if (ETHERNET_SEGMENT_SIZE > segsiz)
11663 oh = ETHERNET_SEGMENT_SIZE - segsiz;
11667 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC;
11668 res = lentim / rate_wanted;
11669 slot = (uint32_t)res;
11670 orig_val = rack->r_ctl.rc_pace_max_segs;
11671 rack_set_pace_segments(rack->rc_tp, rack, __LINE__);
11673 /* For TLS we need to override this, possibly */
11674 if (rack->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) {
11675 rack_set_pace_segments(rack->rc_tp, rack, __LINE__);
11678 /* Did we change the TSO size, if so log it */
11679 if (rack->r_ctl.rc_pace_max_segs != orig_val)
11680 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL);
11681 if ((rack->rc_pace_to_cwnd) &&
11682 (rack->in_probe_rtt == 0) &&
11683 (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) {
11685 * We want to pace at our rate *or* faster to
11686 * fill the cwnd to the max if its not full.
11688 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz);
11690 if ((rack->rc_inp->inp_route.ro_nh != NULL) &&
11691 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
11692 if ((rack->rack_hdw_pace_ena) &&
11693 (rack->rack_hdrw_pacing == 0) &&
11694 (rack->rack_attempt_hdwr_pace == 0)) {
11696 * Lets attempt to turn on hardware pacing
11699 rack->rack_attempt_hdwr_pace = 1;
11700 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp,
11701 rack->rc_inp->inp_route.ro_nh->nh_ifp,
11705 if (rack->r_ctl.crte) {
11706 rack->rack_hdrw_pacing = 1;
11707 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(rate_wanted, segsiz,
11708 0, rack->r_ctl.crte,
11710 rack_log_hdwr_pacing(rack, rack->rc_inp->inp_route.ro_nh->nh_ifp,
11711 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
11714 } else if (rack->rack_hdrw_pacing &&
11715 (rack->r_ctl.crte->rate != rate_wanted)) {
11716 /* Do we need to adjust our rate? */
11717 const struct tcp_hwrate_limit_table *nrte;
11719 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte,
11721 rack->rc_inp->inp_route.ro_nh->nh_ifp,
11725 if (nrte == NULL) {
11726 /* Lost the rate */
11727 rack->rack_hdrw_pacing = 0;
11728 rack_set_pace_segments(rack->rc_tp, rack, __LINE__);
11729 } else if (nrte != rack->r_ctl.crte) {
11730 rack->r_ctl.crte = nrte;
11731 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(rate_wanted,
11735 rack_log_hdwr_pacing(rack, rack->rc_inp->inp_route.ro_nh->nh_ifp,
11736 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
11742 if (rack_limit_time_with_srtt &&
11743 (rack->use_fixed_rate == 0) &&
11744 #ifdef NETFLIX_PEAKRATE
11745 (rack->rc_tp->t_maxpeakrate == 0) &&
11747 (rack->rack_hdrw_pacing == 0)) {
11749 * Sanity check, we do not allow the pacing delay
11750 * to be longer than the SRTT of the path. If it is
11751 * a slow path, then adding a packet should increase
11752 * the RTT and compensate for this i.e. the srtt will
11753 * be greater so the allowed pacing time will be greater.
11755 * Note this restriction is not for where a peak rate
11756 * is set, we are doing fixed pacing or hardware pacing.
11758 if (rack->rc_tp->t_srtt)
11759 srtt = (TICKS_2_USEC(rack->rc_tp->t_srtt) >> TCP_RTT_SHIFT);
11761 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */
11763 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL);
11767 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm);
11770 counter_u64_add(rack_calc_nonzero, 1);
11772 counter_u64_add(rack_calc_zero, 1);
11777 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack,
11778 tcp_seq startseq, uint32_t sb_offset)
11780 struct rack_sendmap *my_rsm = NULL;
11781 struct rack_sendmap fe;
11783 if (tp->t_state < TCPS_ESTABLISHED) {
11785 * We don't start any measurements if we are
11786 * not at least established.
11790 tp->t_flags |= TF_GPUTINPROG;
11791 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
11792 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
11793 tp->gput_seq = startseq;
11794 rack->app_limited_needs_set = 0;
11795 if (rack->in_probe_rtt)
11796 rack->measure_saw_probe_rtt = 1;
11797 else if ((rack->measure_saw_probe_rtt) &&
11798 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
11799 rack->measure_saw_probe_rtt = 0;
11800 if (rack->rc_gp_filled)
11801 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
11803 /* Special case initial measurement */
11804 rack->r_ctl.rc_gp_output_ts = tp->gput_ts = tcp_get_usecs(NULL);
11807 * We take a guess out into the future,
11808 * if we have no measurement and no
11809 * initial rate, we measure the first
11810 * initial-windows worth of data to
11811 * speed up getting some GP measurement and
11812 * thus start pacing.
11814 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) {
11815 rack->app_limited_needs_set = 1;
11816 tp->gput_ack = startseq + max(rc_init_window(rack),
11817 (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
11818 rack_log_pacing_delay_calc(rack,
11823 rack->r_ctl.rc_app_limited_cnt,
11830 * We are out somewhere in the sb
11831 * can we use the already outstanding data?
11834 if (rack->r_ctl.rc_app_limited_cnt == 0) {
11836 * Yes first one is good and in this case
11837 * the tp->gput_ts is correctly set based on
11838 * the last ack that arrived (no need to
11839 * set things up when an ack comes in).
11841 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
11842 if ((my_rsm == NULL) ||
11843 (my_rsm->r_rtr_cnt != 1)) {
11844 /* retransmission? */
11848 if (rack->r_ctl.rc_first_appl == NULL) {
11850 * If rc_first_appl is NULL
11851 * then the cnt should be 0.
11852 * This is probably an error, maybe
11853 * a KASSERT would be approprate.
11858 * If we have a marker pointer to the last one that is
11859 * app limited we can use that, but we need to set
11860 * things up so that when it gets ack'ed we record
11861 * the ack time (if its not already acked).
11863 rack->app_limited_needs_set = 1;
11865 * We want to get to the rsm that is either
11866 * next with space i.e. over 1 MSS or the one
11867 * after that (after the app-limited).
11869 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
11870 rack->r_ctl.rc_first_appl);
11872 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp))
11873 /* Have to use the next one */
11874 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
11877 /* Use after the first MSS of it is acked */
11878 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp);
11882 if ((my_rsm == NULL) ||
11883 (my_rsm->r_rtr_cnt != 1)) {
11885 * Either its a retransmit or
11886 * the last is the app-limited one.
11891 tp->gput_seq = my_rsm->r_start;
11893 if (my_rsm->r_flags & RACK_ACKED) {
11895 * This one has been acked use the arrival ack time
11897 tp->gput_ts = my_rsm->r_ack_arrival;
11898 rack->app_limited_needs_set = 0;
11900 rack->r_ctl.rc_gp_output_ts = my_rsm->usec_orig_send;
11901 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
11902 rack_log_pacing_delay_calc(rack,
11907 rack->r_ctl.rc_app_limited_cnt,
11915 * We don't know how long we may have been
11916 * idle or if this is the first-send. Lets
11917 * setup the flag so we will trim off
11918 * the first ack'd data so we get a true
11921 rack->app_limited_needs_set = 1;
11922 tp->gput_ack = startseq + rack_get_measure_window(tp, rack);
11923 /* Find this guy so we can pull the send time */
11924 fe.r_start = startseq;
11925 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
11927 rack->r_ctl.rc_gp_output_ts = my_rsm->usec_orig_send;
11928 if (my_rsm->r_flags & RACK_ACKED) {
11930 * Unlikely since its probably what was
11931 * just transmitted (but I am paranoid).
11933 tp->gput_ts = my_rsm->r_ack_arrival;
11934 rack->app_limited_needs_set = 0;
11936 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) {
11937 /* This also is unlikely */
11938 tp->gput_seq = my_rsm->r_start;
11942 * TSNH unless we have some send-map limit,
11943 * and even at that it should not be hitting
11944 * that limit (we should have stopped sending).
11946 rack->r_ctl.rc_gp_output_ts = tcp_get_usecs(NULL);
11948 rack_log_pacing_delay_calc(rack,
11953 rack->r_ctl.rc_app_limited_cnt,
11954 9, __LINE__, NULL);
11957 static inline uint32_t
11958 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use,
11959 uint32_t avail, int32_t sb_offset)
11964 if (tp->snd_wnd > cwnd_to_use)
11965 sendwin = cwnd_to_use;
11967 sendwin = tp->snd_wnd;
11968 if (ctf_outstanding(tp) >= tp->snd_wnd) {
11969 /* We never want to go over our peers rcv-window */
11974 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
11975 if (flight >= sendwin) {
11977 * We have in flight what we are allowed by cwnd (if
11978 * it was rwnd blocking it would have hit above out
11983 len = sendwin - flight;
11984 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) {
11985 /* We would send too much (beyond the rwnd) */
11986 len = tp->snd_wnd - ctf_outstanding(tp);
11988 if ((len + sb_offset) > avail) {
11990 * We don't have that much in the SB, how much is
11993 len = avail - sb_offset;
12000 rack_output(struct tcpcb *tp)
12004 uint32_t sb_offset;
12005 int32_t len, flags, error = 0;
12008 uint32_t if_hw_tsomaxsegcount = 0;
12009 uint32_t if_hw_tsomaxsegsize;
12010 int32_t segsiz, minseg;
12011 long tot_len_this_send = 0;
12012 struct ip *ip = NULL;
12014 struct ipovly *ipov = NULL;
12016 struct udphdr *udp = NULL;
12017 struct tcp_rack *rack;
12021 uint8_t wanted_cookie = 0;
12022 u_char opt[TCP_MAXOLEN];
12023 unsigned ipoptlen, optlen, hdrlen, ulen=0;
12026 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
12027 unsigned ipsec_optlen = 0;
12030 int32_t idle, sendalot;
12031 int32_t sub_from_prr = 0;
12032 volatile int32_t sack_rxmit;
12033 struct rack_sendmap *rsm = NULL;
12037 int32_t sup_rack = 0;
12038 uint32_t cts, us_cts, delayed, early;
12039 uint8_t hpts_calling, new_data_tlp = 0, doing_tlp = 0;
12040 uint32_t cwnd_to_use;
12041 int32_t do_a_prefetch;
12042 int32_t prefetch_rsm = 0;
12046 int32_t prefetch_so_done = 0;
12047 struct tcp_log_buffer *lgb = NULL;
12049 struct sockbuf *sb;
12051 struct ip6_hdr *ip6 = NULL;
12054 uint8_t filled_all = 0;
12055 bool hw_tls = false;
12057 /* setup and take the cache hits here */
12058 rack = (struct tcp_rack *)tp->t_fb_ptr;
12059 inp = rack->rc_inp;
12060 so = inp->inp_socket;
12062 kern_prefetch(sb, &do_a_prefetch);
12064 hpts_calling = inp->inp_hpts_calls;
12066 hw_tls = (so->so_snd.sb_flags & SB_TLS_IFNET) != 0;
12069 NET_EPOCH_ASSERT();
12070 INP_WLOCK_ASSERT(inp);
12072 if (tp->t_flags & TF_TOE)
12073 return (tcp_offload_output(tp));
12076 * For TFO connections in SYN_RECEIVED, only allow the initial
12077 * SYN|ACK and those sent by the retransmit timer.
12079 if (IS_FASTOPEN(tp->t_flags) &&
12080 (tp->t_state == TCPS_SYN_RECEIVED) &&
12081 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */
12082 (rack->r_ctl.rc_resend == NULL)) /* not a retransmit */
12085 if (rack->r_state) {
12086 /* Use the cache line loaded if possible */
12087 isipv6 = rack->r_is_v6;
12089 isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
12093 us_cts = tcp_get_usecs(&tv);
12094 cts = tcp_tv_to_mssectick(&tv);
12095 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
12096 inp->inp_in_hpts) {
12098 * We are on the hpts for some timer but not hptsi output.
12099 * Remove from the hpts unconditionally.
12101 rack_timer_cancel(tp, rack, cts, __LINE__);
12103 /* Are we pacing and late? */
12104 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
12105 TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) {
12106 /* We are delayed */
12107 delayed = us_cts - rack->r_ctl.rc_last_output_to;
12111 /* Do the timers, which may override the pacer */
12112 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
12113 if (rack_process_timers(tp, rack, cts, hpts_calling)) {
12114 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
12118 if ((rack->r_timer_override) ||
12120 (tp->t_state < TCPS_ESTABLISHED)) {
12121 if (tp->t_inpcb->inp_in_hpts)
12122 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
12123 } else if (tp->t_inpcb->inp_in_hpts) {
12125 * On the hpts you can't pass even if ACKNOW is on, we will
12126 * when the hpts fires.
12128 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
12131 inp->inp_hpts_calls = 0;
12132 /* Finish out both pacing early and late accounting */
12133 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
12134 TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
12135 early = rack->r_ctl.rc_last_output_to - us_cts;
12139 rack->r_ctl.rc_agg_delayed += delayed;
12141 } else if (early) {
12142 rack->r_ctl.rc_agg_early += early;
12145 /* Now that early/late accounting is done turn off the flag */
12146 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
12147 rack->r_wanted_output = 0;
12148 rack->r_timer_override = 0;
12150 * For TFO connections in SYN_SENT or SYN_RECEIVED,
12151 * only allow the initial SYN or SYN|ACK and those sent
12152 * by the retransmit timer.
12154 if (IS_FASTOPEN(tp->t_flags) &&
12155 ((tp->t_state == TCPS_SYN_RECEIVED) ||
12156 (tp->t_state == TCPS_SYN_SENT)) &&
12157 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */
12158 (tp->t_rxtshift == 0)) { /* not a retransmit */
12159 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
12160 goto just_return_nolock;
12163 * Determine length of data that should be transmitted, and flags
12164 * that will be used. If there is some data or critical controls
12165 * (SYN, RST) to send, then transmit; otherwise, investigate
12168 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
12169 if (tp->t_idle_reduce) {
12170 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur))
12171 rack_cc_after_idle(rack, tp);
12173 tp->t_flags &= ~TF_LASTIDLE;
12175 if (tp->t_flags & TF_MORETOCOME) {
12176 tp->t_flags |= TF_LASTIDLE;
12180 if ((tp->snd_una == tp->snd_max) &&
12181 rack->r_ctl.rc_went_idle_time &&
12182 TSTMP_GT(us_cts, rack->r_ctl.rc_went_idle_time)) {
12183 idle = us_cts - rack->r_ctl.rc_went_idle_time;
12184 if (idle > rack_min_probertt_hold) {
12185 /* Count as a probe rtt */
12186 if (rack->in_probe_rtt == 0) {
12187 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
12188 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
12189 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
12190 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
12192 rack_exit_probertt(rack, us_cts);
12199 * If we've recently taken a timeout, snd_max will be greater than
12200 * snd_nxt. There may be SACK information that allows us to avoid
12201 * resending already delivered data. Adjust snd_nxt accordingly.
12204 us_cts = tcp_get_usecs(&tv);
12205 cts = tcp_tv_to_mssectick(&tv);
12208 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
12209 if (so->so_snd.sb_flags & SB_TLS_IFNET) {
12210 minseg = rack->r_ctl.rc_pace_min_segs;
12214 sb_offset = tp->snd_max - tp->snd_una;
12215 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
12216 #ifdef NETFLIX_SHARED_CWND
12217 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) &&
12218 rack->rack_enable_scwnd) {
12219 /* We are doing cwnd sharing */
12220 if (rack->rc_gp_filled &&
12221 (rack->rack_attempted_scwnd == 0) &&
12222 (rack->r_ctl.rc_scw == NULL) &&
12224 /* The pcbid is in, lets make an attempt */
12225 counter_u64_add(rack_try_scwnd, 1);
12226 rack->rack_attempted_scwnd = 1;
12227 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp,
12228 &rack->r_ctl.rc_scw_index,
12231 if (rack->r_ctl.rc_scw &&
12232 (rack->rack_scwnd_is_idle == 1) &&
12233 (rack->rc_in_persist == 0) &&
12235 /* we are no longer out of data */
12236 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
12237 rack->rack_scwnd_is_idle = 0;
12239 if (rack->r_ctl.rc_scw) {
12240 /* First lets update and get the cwnd */
12241 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw,
12242 rack->r_ctl.rc_scw_index,
12243 tp->snd_cwnd, tp->snd_wnd, segsiz);
12247 flags = tcp_outflags[tp->t_state];
12248 while (rack->rc_free_cnt < rack_free_cache) {
12249 rsm = rack_alloc(rack);
12251 if (inp->inp_hpts_calls)
12252 /* Retry in a ms */
12253 slot = (1 * HPTS_USEC_IN_MSEC);
12254 goto just_return_nolock;
12256 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
12257 rack->rc_free_cnt++;
12260 if (inp->inp_hpts_calls)
12261 inp->inp_hpts_calls = 0;
12265 if (flags & TH_RST) {
12269 if (rack->r_ctl.rc_resend) {
12270 /* Retransmit timer */
12271 rsm = rack->r_ctl.rc_resend;
12272 rack->r_ctl.rc_resend = NULL;
12273 rsm->r_flags &= ~RACK_TLP;
12274 len = rsm->r_end - rsm->r_start;
12277 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
12278 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
12279 __func__, __LINE__,
12280 rsm->r_start, tp->snd_una, tp, rack, rsm));
12281 sb_offset = rsm->r_start - tp->snd_una;
12284 } else if ((rack->rc_in_persist == 0) &&
12285 ((rsm = tcp_rack_output(tp, rack, cts)) != NULL)) {
12286 /* We have a retransmit that takes precedence */
12287 rsm->r_flags &= ~RACK_TLP;
12288 if ((!IN_RECOVERY(tp->t_flags)) &&
12289 ((tp->t_flags & (TF_WASFRECOVERY | TF_WASCRECOVERY)) == 0)) {
12290 /* Enter recovery if not induced by a time-out */
12291 rack->r_ctl.rc_rsm_start = rsm->r_start;
12292 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
12293 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
12294 rack_cong_signal(tp, NULL, CC_NDUPACK);
12296 * When we enter recovery we need to assure we send
12299 if (rack->rack_no_prr == 0) {
12300 rack->r_ctl.rc_prr_sndcnt = segsiz;
12301 rack_log_to_prr(rack, 13, 0);
12305 if (SEQ_LT(rsm->r_start, tp->snd_una)) {
12306 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
12307 tp, rack, rsm, rsm->r_start, tp->snd_una);
12310 len = rsm->r_end - rsm->r_start;
12311 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
12312 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
12313 __func__, __LINE__,
12314 rsm->r_start, tp->snd_una, tp, rack, rsm));
12315 sb_offset = rsm->r_start - tp->snd_una;
12316 /* Can we send it within the PRR boundary? */
12317 if (rack->rack_no_prr == 0) {
12318 if ((rack->use_rack_rr == 0) && (len > rack->r_ctl.rc_prr_sndcnt)) {
12319 /* It does not fit */
12320 if ((ctf_flight_size(tp, rack->r_ctl.rc_sacked) > len) &&
12321 (rack->r_ctl.rc_prr_sndcnt < segsiz)) {
12323 * prr is less than a segment, we
12324 * have more acks due in besides
12325 * what we need to resend. Lets not send
12326 * to avoid sending small pieces of
12327 * what we need to retransmit.
12330 goto just_return_nolock;
12332 len = rack->r_ctl.rc_prr_sndcnt;
12341 KMOD_TCPSTAT_INC(tcps_sack_rexmits);
12342 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes,
12344 counter_u64_add(rack_rtm_prr_retran, 1);
12346 } else if (rack->r_ctl.rc_tlpsend) {
12347 /* Tail loss probe */
12353 * Check if we can do a TLP with a RACK'd packet
12354 * this can happen if we are not doing the rack
12355 * cheat and we skipped to a TLP and it
12358 rsm = rack->r_ctl.rc_tlpsend;
12359 rsm->r_flags |= RACK_TLP;
12360 rack->r_ctl.rc_tlpsend = NULL;
12362 tlen = rsm->r_end - rsm->r_start;
12365 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
12366 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
12367 __func__, __LINE__,
12368 rsm->r_start, tp->snd_una, tp, rack, rsm));
12369 sb_offset = rsm->r_start - tp->snd_una;
12370 cwin = min(tp->snd_wnd, tlen);
12374 * Enforce a connection sendmap count limit if set
12375 * as long as we are not retransmiting.
12377 if ((rsm == NULL) &&
12378 (rack->do_detection == 0) &&
12379 (V_tcp_map_entries_limit > 0) &&
12380 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
12381 counter_u64_add(rack_to_alloc_limited, 1);
12382 if (!rack->alloc_limit_reported) {
12383 rack->alloc_limit_reported = 1;
12384 counter_u64_add(rack_alloc_limited_conns, 1);
12386 goto just_return_nolock;
12388 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
12389 /* we are retransmitting the fin */
12393 * When retransmitting data do *not* include the
12394 * FIN. This could happen from a TLP probe.
12400 /* For debugging */
12401 rack->r_ctl.rc_rsm_at_retran = rsm;
12404 * Get standard flags, and add SYN or FIN if requested by 'hidden'
12407 if (tp->t_flags & TF_NEEDFIN)
12409 if (tp->t_flags & TF_NEEDSYN)
12411 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
12413 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
12415 kern_prefetch(end_rsm, &prefetch_rsm);
12420 * If snd_nxt == snd_max and we have transmitted a FIN, the
12421 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
12422 * negative length. This can also occur when TCP opens up its
12423 * congestion window while receiving additional duplicate acks after
12424 * fast-retransmit because TCP will reset snd_nxt to snd_max after
12425 * the fast-retransmit.
12427 * In the normal retransmit-FIN-only case, however, snd_nxt will be
12428 * set to snd_una, the sb_offset will be 0, and the length may wind
12431 * If sack_rxmit is true we are retransmitting from the scoreboard
12432 * in which case len is already set.
12434 if ((sack_rxmit == 0) && TCPS_HAVEESTABLISHED(tp->t_state)) {
12437 avail = sbavail(sb);
12438 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail)
12439 sb_offset = tp->snd_nxt - tp->snd_una;
12442 if ((IN_RECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) {
12443 if (rack->r_ctl.rc_tlp_new_data) {
12444 /* TLP is forcing out new data */
12445 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
12446 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
12448 if (rack->r_ctl.rc_tlp_new_data > tp->snd_wnd)
12451 len = rack->r_ctl.rc_tlp_new_data;
12452 rack->r_ctl.rc_tlp_new_data = 0;
12453 new_data_tlp = doing_tlp = 1;
12455 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset);
12456 if (IN_RECOVERY(tp->t_flags) && (len > segsiz)) {
12458 * For prr=off, we need to send only 1 MSS
12459 * at a time. We do this because another sack could
12460 * be arriving that causes us to send retransmits and
12461 * we don't want to be on a long pace due to a larger send
12462 * that keeps us from sending out the retransmit.
12467 uint32_t outstanding;
12470 * We are inside of a SACK recovery episode and are
12471 * sending new data, having retransmitted all the
12472 * data possible so far in the scoreboard.
12474 outstanding = tp->snd_max - tp->snd_una;
12475 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) {
12476 if (tp->snd_wnd > outstanding) {
12477 len = tp->snd_wnd - outstanding;
12478 /* Check to see if we have the data */
12479 if ((sb_offset + len) > avail) {
12480 /* It does not all fit */
12481 if (avail > sb_offset)
12482 len = avail - sb_offset;
12488 } else if (avail > sb_offset)
12489 len = avail - sb_offset;
12493 if (len > rack->r_ctl.rc_prr_sndcnt)
12494 len = rack->r_ctl.rc_prr_sndcnt;
12497 counter_u64_add(rack_rtm_prr_newdata, 1);
12500 if (len > segsiz) {
12502 * We should never send more than a MSS when
12503 * retransmitting or sending new data in prr
12504 * mode unless the override flag is on. Most
12505 * likely the PRR algorithm is not going to
12506 * let us send a lot as well :-)
12508 if (rack->r_ctl.rc_prr_sendalot == 0)
12510 } else if (len < segsiz) {
12512 * Do we send any? The idea here is if the
12513 * send empty's the socket buffer we want to
12514 * do it. However if not then lets just wait
12515 * for our prr_sndcnt to get bigger.
12519 leftinsb = sbavail(sb) - sb_offset;
12520 if (leftinsb > len) {
12521 /* This send does not empty the sb */
12526 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) {
12528 * If you have not established
12529 * and are not doing FAST OPEN
12532 if ((sack_rxmit == 0) &&
12533 (!IS_FASTOPEN(tp->t_flags))){
12538 if (prefetch_so_done == 0) {
12539 kern_prefetch(so, &prefetch_so_done);
12540 prefetch_so_done = 1;
12543 * Lop off SYN bit if it has already been sent. However, if this is
12544 * SYN-SENT state and if segment contains data and if we don't know
12545 * that foreign host supports TAO, suppress sending segment.
12547 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) &&
12548 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) {
12550 * When sending additional segments following a TFO SYN|ACK,
12551 * do not include the SYN bit.
12553 if (IS_FASTOPEN(tp->t_flags) &&
12554 (tp->t_state == TCPS_SYN_RECEIVED))
12558 * Be careful not to send data and/or FIN on SYN segments. This
12559 * measure is needed to prevent interoperability problems with not
12560 * fully conformant TCP implementations.
12562 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
12567 * On TFO sockets, ensure no data is sent in the following cases:
12569 * - When retransmitting SYN|ACK on a passively-created socket
12571 * - When retransmitting SYN on an actively created socket
12573 * - When sending a zero-length cookie (cookie request) on an
12574 * actively created socket
12576 * - When the socket is in the CLOSED state (RST is being sent)
12578 if (IS_FASTOPEN(tp->t_flags) &&
12579 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
12580 ((tp->t_state == TCPS_SYN_SENT) &&
12581 (tp->t_tfo_client_cookie_len == 0)) ||
12582 (flags & TH_RST))) {
12586 /* Without fast-open there should never be data sent on a SYN */
12587 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) {
12588 tp->snd_nxt = tp->iss;
12594 * If FIN has been sent but not acked, but we haven't been
12595 * called to retransmit, len will be < 0. Otherwise, window
12596 * shrank after we sent into it. If window shrank to 0,
12597 * cancel pending retransmit, pull snd_nxt back to (closed)
12598 * window, and set the persist timer if it isn't already
12599 * going. If the window didn't close completely, just wait
12602 * We also do a general check here to ensure that we will
12603 * set the persist timer when we have data to send, but a
12604 * 0-byte window. This makes sure the persist timer is set
12605 * even if the packet hits one of the "goto send" lines
12609 if ((tp->snd_wnd == 0) &&
12610 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
12611 (tp->snd_una == tp->snd_max) &&
12612 (sb_offset < (int)sbavail(sb))) {
12613 tp->snd_nxt = tp->snd_una;
12614 rack_enter_persist(tp, rack, cts);
12616 } else if ((rsm == NULL) &&
12617 ((doing_tlp == 0) || (new_data_tlp == 1)) &&
12618 (len < rack->r_ctl.rc_pace_max_segs)) {
12620 * We are not sending a maximum sized segment for
12621 * some reason. Should we not send anything (think
12622 * sws or persists)?
12624 if ((tp->snd_wnd < min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), minseg)) &&
12625 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
12627 (len < (int)(sbavail(sb) - sb_offset))) {
12629 * Here the rwnd is less than
12630 * the minimum pacing size, this is not a retransmit,
12631 * we are established and
12632 * the send is not the last in the socket buffer
12633 * we send nothing, and we may enter persists
12634 * if nothing is outstanding.
12637 if (tp->snd_max == tp->snd_una) {
12639 * Nothing out we can
12640 * go into persists.
12642 rack_enter_persist(tp, rack, cts);
12643 tp->snd_nxt = tp->snd_una;
12645 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) &&
12646 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
12647 (len < (int)(sbavail(sb) - sb_offset)) &&
12650 * Here we are not retransmitting, and
12651 * the cwnd is not so small that we could
12652 * not send at least a min size (rxt timer
12653 * not having gone off), We have 2 segments or
12654 * more already in flight, its not the tail end
12655 * of the socket buffer and the cwnd is blocking
12656 * us from sending out a minimum pacing segment size.
12657 * Lets not send anything.
12660 } else if (((tp->snd_wnd - ctf_outstanding(tp)) <
12661 min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
12662 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
12663 (len < (int)(sbavail(sb) - sb_offset)) &&
12664 (TCPS_HAVEESTABLISHED(tp->t_state))) {
12666 * Here we have a send window but we have
12667 * filled it up and we can't send another pacing segment.
12668 * We also have in flight more than 2 segments
12669 * and we are not completing the sb i.e. we allow
12670 * the last bytes of the sb to go out even if
12671 * its not a full pacing segment.
12676 /* len will be >= 0 after this point. */
12677 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
12678 tcp_sndbuf_autoscale(tp, so, min(tp->snd_wnd, cwnd_to_use));
12680 * Decide if we can use TCP Segmentation Offloading (if supported by
12683 * TSO may only be used if we are in a pure bulk sending state. The
12684 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
12685 * options prevent using TSO. With TSO the TCP header is the same
12686 * (except for the sequence number) for all generated packets. This
12687 * makes it impossible to transmit any options which vary per
12688 * generated segment or packet.
12690 * IPv4 handling has a clear separation of ip options and ip header
12691 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
12692 * the right thing below to provide length of just ip options and thus
12693 * checking for ipoptlen is enough to decide if ip options are present.
12698 ipoptlen = ip6_optlen(tp->t_inpcb);
12701 if (tp->t_inpcb->inp_options)
12702 ipoptlen = tp->t_inpcb->inp_options->m_len -
12703 offsetof(struct ipoption, ipopt_list);
12706 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
12708 * Pre-calculate here as we save another lookup into the darknesses
12709 * of IPsec that way and can actually decide if TSO is ok.
12712 if (isipv6 && IPSEC_ENABLED(ipv6))
12713 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb);
12719 if (IPSEC_ENABLED(ipv4))
12720 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb);
12724 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
12725 ipoptlen += ipsec_optlen;
12727 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz &&
12728 (tp->t_port == 0) &&
12729 ((tp->t_flags & TF_SIGNATURE) == 0) &&
12730 tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
12734 uint32_t outstanding;
12736 outstanding = tp->snd_max - tp->snd_una;
12737 if (tp->t_flags & TF_SENTFIN) {
12739 * If we sent a fin, snd_max is 1 higher than
12745 if ((rsm->r_flags & RACK_HAS_FIN) == 0)
12748 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
12753 recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
12754 (long)TCP_MAXWIN << tp->rcv_scale);
12757 * Sender silly window avoidance. We transmit under the following
12758 * conditions when len is non-zero:
12760 * - We have a full segment (or more with TSO) - This is the last
12761 * buffer in a write()/send() and we are either idle or running
12762 * NODELAY - we've timed out (e.g. persist timer) - we have more
12763 * then 1/2 the maximum send window's worth of data (receiver may be
12764 * limited the window size) - we need to retransmit
12767 if (len >= segsiz) {
12771 * NOTE! on localhost connections an 'ack' from the remote
12772 * end may occur synchronously with the output and cause us
12773 * to flush a buffer queued with moretocome. XXX
12776 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
12777 (idle || (tp->t_flags & TF_NODELAY)) &&
12778 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
12779 (tp->t_flags & TF_NOPUSH) == 0) {
12783 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */
12787 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
12791 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */
12799 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) &&
12800 (ctf_outstanding(tp) < (segsiz * 2))) {
12802 * We have less than two MSS outstanding (delayed ack)
12803 * and our rwnd will not let us send a full sized
12804 * MSS. Lets go ahead and let this small segment
12805 * out because we want to try to have at least two
12806 * packets inflight to not be caught by delayed ack.
12813 * Sending of standalone window updates.
12815 * Window updates are important when we close our window due to a
12816 * full socket buffer and are opening it again after the application
12817 * reads data from it. Once the window has opened again and the
12818 * remote end starts to send again the ACK clock takes over and
12819 * provides the most current window information.
12821 * We must avoid the silly window syndrome whereas every read from
12822 * the receive buffer, no matter how small, causes a window update
12823 * to be sent. We also should avoid sending a flurry of window
12824 * updates when the socket buffer had queued a lot of data and the
12825 * application is doing small reads.
12827 * Prevent a flurry of pointless window updates by only sending an
12828 * update when we can increase the advertized window by more than
12829 * 1/4th of the socket buffer capacity. When the buffer is getting
12830 * full or is very small be more aggressive and send an update
12831 * whenever we can increase by two mss sized segments. In all other
12832 * situations the ACK's to new incoming data will carry further
12833 * window increases.
12835 * Don't send an independent window update if a delayed ACK is
12836 * pending (it will get piggy-backed on it) or the remote side
12837 * already has done a half-close and won't send more data. Skip
12838 * this if the connection is in T/TCP half-open state.
12840 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
12841 !(tp->t_flags & TF_DELACK) &&
12842 !TCPS_HAVERCVDFIN(tp->t_state)) {
12844 * "adv" is the amount we could increase the window, taking
12845 * into account that we are limited by TCP_MAXWIN <<
12852 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
12853 oldwin = (tp->rcv_adv - tp->rcv_nxt);
12857 /* We can't increase the window */
12864 * If the new window size ends up being the same as or less
12865 * than the old size when it is scaled, then don't force
12868 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
12871 if (adv >= (int32_t)(2 * segsiz) &&
12872 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
12873 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
12874 so->so_rcv.sb_hiwat <= 8 * segsiz)) {
12878 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) {
12886 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
12887 * is also a catch-all for the retransmit timer timeout case.
12889 if (tp->t_flags & TF_ACKNOW) {
12893 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
12898 * If our state indicates that FIN should be sent and we have not
12899 * yet done so, then we need to send.
12901 if ((flags & TH_FIN) &&
12902 (tp->snd_nxt == tp->snd_una)) {
12907 * No reason to send a segment, just return.
12910 SOCKBUF_UNLOCK(sb);
12911 just_return_nolock:
12913 int app_limited = CTF_JR_SENT_DATA;
12915 if (tot_len_this_send > 0) {
12916 /* Make sure snd_nxt is up to max */
12917 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
12918 tp->snd_nxt = tp->snd_max;
12919 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz);
12921 int end_window = 0;
12922 uint32_t seq = tp->gput_ack;
12924 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
12927 * Mark the last sent that we just-returned (hinting
12928 * that delayed ack may play a role in any rtt measurement).
12930 rsm->r_just_ret = 1;
12932 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
12933 rack->r_ctl.rc_agg_delayed = 0;
12936 rack->r_ctl.rc_agg_early = 0;
12937 if ((ctf_outstanding(tp) +
12938 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)),
12939 minseg)) >= tp->snd_wnd) {
12940 /* We are limited by the rwnd */
12941 app_limited = CTF_JR_RWND_LIMITED;
12942 } else if (ctf_outstanding(tp) >= sbavail(sb)) {
12943 /* We are limited by whats available -- app limited */
12944 app_limited = CTF_JR_APP_LIMITED;
12945 } else if ((idle == 0) &&
12946 ((tp->t_flags & TF_NODELAY) == 0) &&
12947 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
12950 * No delay is not on and the
12951 * user is sending less than 1MSS. This
12952 * brings out SWS avoidance so we
12953 * don't send. Another app-limited case.
12955 app_limited = CTF_JR_APP_LIMITED;
12956 } else if (tp->t_flags & TF_NOPUSH) {
12958 * The user has requested no push of
12959 * the last segment and we are
12960 * at the last segment. Another app
12963 app_limited = CTF_JR_APP_LIMITED;
12964 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) {
12966 app_limited = CTF_JR_CWND_LIMITED;
12967 } else if (rack->rc_in_persist == 1) {
12968 /* We are in persists */
12969 app_limited = CTF_JR_PERSISTS;
12970 } else if (IN_RECOVERY(tp->t_flags) &&
12971 (rack->rack_no_prr == 0) &&
12972 (rack->r_ctl.rc_prr_sndcnt < segsiz)) {
12973 app_limited = CTF_JR_PRR;
12975 /* Now why here are we not sending? */
12978 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use);
12981 app_limited = CTF_JR_ASSESSING;
12984 * App limited in some fashion, for our pacing GP
12985 * measurements we don't want any gap (even cwnd).
12986 * Close down the measurement window.
12988 if (rack_cwnd_block_ends_measure &&
12989 ((app_limited == CTF_JR_CWND_LIMITED) ||
12990 (app_limited == CTF_JR_PRR))) {
12992 * The reason we are not sending is
12993 * the cwnd (or prr). We have been configured
12994 * to end the measurement window in
12998 } else if (app_limited == CTF_JR_PERSISTS) {
13000 * We never end the measurement window
13001 * in persists, though in theory we
13002 * should be only entering after everything
13003 * is acknowledged (so we will probably
13004 * never come here).
13007 } else if (rack_rwnd_block_ends_measure &&
13008 (app_limited == CTF_JR_RWND_LIMITED)) {
13010 * We are rwnd limited and have been
13011 * configured to end the measurement
13012 * window in this case.
13015 } else if (app_limited == CTF_JR_APP_LIMITED) {
13017 * A true application limited period, we have
13021 } else if (app_limited == CTF_JR_ASSESSING) {
13023 * In the assessing case we hit the end of
13024 * the if/else and had no known reason
13025 * This will panic us under invariants..
13027 * If we get this out in logs we need to
13028 * investagate which reason we missed.
13035 if ((tp->t_flags & TF_GPUTINPROG) &&
13036 SEQ_GT(tp->gput_ack, tp->snd_max)) {
13037 /* Mark the last packet has app limited */
13038 tp->gput_ack = tp->snd_max;
13041 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
13042 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
13043 if (rack->r_ctl.rc_app_limited_cnt == 0)
13044 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
13047 * Go out to the end app limited and mark
13048 * this new one as next and move the end_appl up
13051 if (rack->r_ctl.rc_end_appl)
13052 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
13053 rack->r_ctl.rc_end_appl = rsm;
13055 rsm->r_flags |= RACK_APP_LIMITED;
13056 rack->r_ctl.rc_app_limited_cnt++;
13059 rack_log_pacing_delay_calc(rack,
13060 rack->r_ctl.rc_app_limited_cnt, seq,
13061 tp->gput_ack, 0, 0, 4, __LINE__, NULL);
13065 /* set the rack tcb into the slot N */
13066 counter_u64_add(rack_paced_segments, 1);
13067 } else if (tot_len_this_send) {
13068 counter_u64_add(rack_unpaced_segments, 1);
13070 /* Check if we need to go into persists or not */
13071 if ((rack->rc_in_persist == 0) &&
13072 (tp->snd_max == tp->snd_una) &&
13073 TCPS_HAVEESTABLISHED(tp->t_state) &&
13075 (sbavail(sb) > tp->snd_wnd) &&
13076 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) {
13077 /* Yes lets make sure to move to persist before timer-start */
13078 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
13080 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack);
13081 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use);
13083 #ifdef NETFLIX_SHARED_CWND
13084 if ((sbavail(sb) == 0) &&
13085 rack->r_ctl.rc_scw) {
13086 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
13087 rack->rack_scwnd_is_idle = 1;
13093 if ((flags & TH_FIN) &&
13096 * We do not transmit a FIN
13097 * with data outstanding. We
13098 * need to make it so all data
13103 /* Enforce stack imposed max seg size if we have one */
13104 if (rack->r_ctl.rc_pace_max_segs &&
13105 (len > rack->r_ctl.rc_pace_max_segs)) {
13107 len = rack->r_ctl.rc_pace_max_segs;
13109 SOCKBUF_LOCK_ASSERT(sb);
13112 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
13114 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
13117 * Before ESTABLISHED, force sending of initial options unless TCP
13118 * set not to do any options. NOTE: we assume that the IP/TCP header
13119 * plus TCP options always fit in a single mbuf, leaving room for a
13120 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
13121 * + optlen <= MCLBYTES
13126 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
13129 hdrlen = sizeof(struct tcpiphdr);
13132 * Compute options for segment. We only have to care about SYN and
13133 * established connection segments. Options for SYN-ACK segments
13134 * are handled in TCP syncache.
13137 if ((tp->t_flags & TF_NOOPT) == 0) {
13138 /* Maximum segment size. */
13139 if (flags & TH_SYN) {
13140 tp->snd_nxt = tp->iss;
13141 to.to_mss = tcp_mssopt(&inp->inp_inc);
13142 #ifdef NETFLIX_TCPOUDP
13144 to.to_mss -= V_tcp_udp_tunneling_overhead;
13146 to.to_flags |= TOF_MSS;
13149 * On SYN or SYN|ACK transmits on TFO connections,
13150 * only include the TFO option if it is not a
13151 * retransmit, as the presence of the TFO option may
13152 * have caused the original SYN or SYN|ACK to have
13153 * been dropped by a middlebox.
13155 if (IS_FASTOPEN(tp->t_flags) &&
13156 (tp->t_rxtshift == 0)) {
13157 if (tp->t_state == TCPS_SYN_RECEIVED) {
13158 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
13160 (u_int8_t *)&tp->t_tfo_cookie.server;
13161 to.to_flags |= TOF_FASTOPEN;
13163 } else if (tp->t_state == TCPS_SYN_SENT) {
13165 tp->t_tfo_client_cookie_len;
13167 tp->t_tfo_cookie.client;
13168 to.to_flags |= TOF_FASTOPEN;
13171 * If we wind up having more data to
13172 * send with the SYN than can fit in
13173 * one segment, don't send any more
13174 * until the SYN|ACK comes back from
13181 /* Window scaling. */
13182 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
13183 to.to_wscale = tp->request_r_scale;
13184 to.to_flags |= TOF_SCALE;
13187 if ((tp->t_flags & TF_RCVD_TSTMP) ||
13188 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
13189 to.to_tsval = cts + tp->ts_offset;
13190 to.to_tsecr = tp->ts_recent;
13191 to.to_flags |= TOF_TS;
13193 /* Set receive buffer autosizing timestamp. */
13194 if (tp->rfbuf_ts == 0 &&
13195 (so->so_rcv.sb_flags & SB_AUTOSIZE))
13196 tp->rfbuf_ts = tcp_ts_getticks();
13197 /* Selective ACK's. */
13198 if (flags & TH_SYN)
13199 to.to_flags |= TOF_SACKPERM;
13200 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
13201 tp->rcv_numsacks > 0) {
13202 to.to_flags |= TOF_SACK;
13203 to.to_nsacks = tp->rcv_numsacks;
13204 to.to_sacks = (u_char *)tp->sackblks;
13206 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
13207 /* TCP-MD5 (RFC2385). */
13208 if (tp->t_flags & TF_SIGNATURE)
13209 to.to_flags |= TOF_SIGNATURE;
13210 #endif /* TCP_SIGNATURE */
13212 /* Processing the options. */
13213 hdrlen += optlen = tcp_addoptions(&to, opt);
13215 * If we wanted a TFO option to be added, but it was unable
13216 * to fit, ensure no data is sent.
13218 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
13219 !(to.to_flags & TOF_FASTOPEN))
13222 #ifdef NETFLIX_TCPOUDP
13224 if (V_tcp_udp_tunneling_port == 0) {
13225 /* The port was removed?? */
13226 SOCKBUF_UNLOCK(&so->so_snd);
13227 return (EHOSTUNREACH);
13229 hdrlen += sizeof(struct udphdr);
13234 ipoptlen = ip6_optlen(tp->t_inpcb);
13237 if (tp->t_inpcb->inp_options)
13238 ipoptlen = tp->t_inpcb->inp_options->m_len -
13239 offsetof(struct ipoption, ipopt_list);
13242 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
13243 ipoptlen += ipsec_optlen;
13247 /* force TSO for so TLS offload can get mss */
13248 if (sb->sb_flags & SB_TLS_IFNET) {
13253 * Adjust data length if insertion of options will bump the packet
13254 * length beyond the t_maxseg length. Clear the FIN bit because we
13255 * cut off the tail of the segment.
13257 if (len + optlen + ipoptlen > tp->t_maxseg) {
13259 uint32_t if_hw_tsomax;
13263 /* extract TSO information */
13264 if_hw_tsomax = tp->t_tsomax;
13265 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
13266 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
13267 KASSERT(ipoptlen == 0,
13268 ("%s: TSO can't do IP options", __func__));
13271 * Check if we should limit by maximum payload
13274 if (if_hw_tsomax != 0) {
13275 /* compute maximum TSO length */
13276 max_len = (if_hw_tsomax - hdrlen -
13278 if (max_len <= 0) {
13280 } else if (len > max_len) {
13287 * Prevent the last segment from being fractional
13288 * unless the send sockbuf can be emptied:
13290 max_len = (tp->t_maxseg - optlen);
13291 if (((sb_offset + len) < sbavail(sb)) &&
13293 moff = len % (u_int)max_len;
13300 * In case there are too many small fragments don't
13303 if (len <= segsiz) {
13308 * Send the FIN in a separate segment after the bulk
13309 * sending is done. We don't trust the TSO
13310 * implementations to clear the FIN flag on all but
13311 * the last segment.
13313 if (tp->t_flags & TF_NEEDFIN) {
13318 if (optlen + ipoptlen >= tp->t_maxseg) {
13320 * Since we don't have enough space to put
13321 * the IP header chain and the TCP header in
13322 * one packet as required by RFC 7112, don't
13323 * send it. Also ensure that at least one
13324 * byte of the payload can be put into the
13327 SOCKBUF_UNLOCK(&so->so_snd);
13332 len = tp->t_maxseg - optlen - ipoptlen;
13339 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
13340 ("%s: len > IP_MAXPACKET", __func__));
13343 if (max_linkhdr + hdrlen > MCLBYTES)
13345 if (max_linkhdr + hdrlen > MHLEN)
13347 panic("tcphdr too big");
13351 * This KASSERT is here to catch edge cases at a well defined place.
13352 * Before, those had triggered (random) panic conditions further
13355 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
13357 (flags & TH_FIN) &&
13360 * We have outstanding data, don't send a fin by itself!.
13365 * Grab a header mbuf, attaching a copy of data to be transmitted,
13366 * and initialize the header from the template for sends on this
13373 if (rack->r_ctl.rc_pace_max_segs)
13374 max_val = rack->r_ctl.rc_pace_max_segs;
13375 else if (rack->rc_user_set_max_segs)
13376 max_val = rack->rc_user_set_max_segs * segsiz;
13380 * We allow a limit on sending with hptsi.
13382 if (len > max_val) {
13387 if (MHLEN < hdrlen + max_linkhdr)
13388 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
13391 m = m_gethdr(M_NOWAIT, MT_DATA);
13394 SOCKBUF_UNLOCK(sb);
13399 m->m_data += max_linkhdr;
13403 * Start the m_copy functions from the closest mbuf to the
13404 * sb_offset in the socket buffer chain.
13406 mb = sbsndptr_noadv(sb, sb_offset, &moff);
13407 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
13408 m_copydata(mb, moff, (int)len,
13409 mtod(m, caddr_t)+hdrlen);
13410 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
13411 sbsndptr_adv(sb, mb, len);
13414 struct sockbuf *msb;
13416 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
13420 m->m_next = tcp_m_copym(
13422 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
13423 ((rsm == NULL) ? hw_tls : 0)
13424 #ifdef NETFLIX_COPY_ARGS
13428 if (len <= (tp->t_maxseg - optlen)) {
13430 * Must have ran out of mbufs for the copy
13431 * shorten it to no longer need tso. Lets
13432 * not put on sendalot since we are low on
13437 if (m->m_next == NULL) {
13438 SOCKBUF_UNLOCK(sb);
13445 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
13446 if (rsm && (rsm->r_flags & RACK_TLP)) {
13448 * TLP should not count in retran count, but
13451 counter_u64_add(rack_tlp_retran, 1);
13452 counter_u64_add(rack_tlp_retran_bytes, len);
13454 tp->t_sndrexmitpack++;
13455 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
13456 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
13459 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
13463 KMOD_TCPSTAT_INC(tcps_sndpack);
13464 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
13466 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
13471 * If we're sending everything we've got, set PUSH. (This
13472 * will keep happy those implementations which only give
13473 * data to the user when a buffer fills or a PUSH comes in.)
13475 if (sb_offset + len == sbused(sb) &&
13480 SOCKBUF_UNLOCK(sb);
13482 SOCKBUF_UNLOCK(sb);
13483 if (tp->t_flags & TF_ACKNOW)
13484 KMOD_TCPSTAT_INC(tcps_sndacks);
13485 else if (flags & (TH_SYN | TH_FIN | TH_RST))
13486 KMOD_TCPSTAT_INC(tcps_sndctrl);
13488 KMOD_TCPSTAT_INC(tcps_sndwinup);
13490 m = m_gethdr(M_NOWAIT, MT_DATA);
13497 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
13499 M_ALIGN(m, hdrlen);
13502 m->m_data += max_linkhdr;
13505 SOCKBUF_UNLOCK_ASSERT(sb);
13506 m->m_pkthdr.rcvif = (struct ifnet *)0;
13508 mac_inpcb_create_mbuf(inp, m);
13512 ip6 = mtod(m, struct ip6_hdr *);
13513 #ifdef NETFLIX_TCPOUDP
13515 udp = (struct udphdr *)((caddr_t)ip6 + ipoptlen + sizeof(struct ip6_hdr));
13516 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
13517 udp->uh_dport = tp->t_port;
13518 ulen = hdrlen + len - sizeof(struct ip6_hdr);
13519 udp->uh_ulen = htons(ulen);
13520 th = (struct tcphdr *)(udp + 1);
13523 th = (struct tcphdr *)(ip6 + 1);
13524 tcpip_fillheaders(inp,
13525 #ifdef NETFLIX_TCPOUDP
13532 ip = mtod(m, struct ip *);
13534 ipov = (struct ipovly *)ip;
13536 #ifdef NETFLIX_TCPOUDP
13538 udp = (struct udphdr *)((caddr_t)ip + ipoptlen + sizeof(struct ip));
13539 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
13540 udp->uh_dport = tp->t_port;
13541 ulen = hdrlen + len - sizeof(struct ip);
13542 udp->uh_ulen = htons(ulen);
13543 th = (struct tcphdr *)(udp + 1);
13546 th = (struct tcphdr *)(ip + 1);
13547 tcpip_fillheaders(inp,
13548 #ifdef NETFLIX_TCPOUDP
13554 * Fill in fields, remembering maximum advertised window for use in
13555 * delaying messages about window sizes. If resending a FIN, be sure
13556 * not to use a new sequence number.
13558 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
13559 tp->snd_nxt == tp->snd_max)
13562 * If we are starting a connection, send ECN setup SYN packet. If we
13563 * are on a retransmit, we may resend those bits a number of times
13566 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) {
13567 if (tp->t_rxtshift >= 1) {
13568 if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
13569 flags |= TH_ECE | TH_CWR;
13571 flags |= TH_ECE | TH_CWR;
13573 /* Handle parallel SYN for ECN */
13574 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
13575 (tp->t_flags2 & TF2_ECN_SND_ECE)) {
13577 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
13579 if (tp->t_state == TCPS_ESTABLISHED &&
13580 (tp->t_flags2 & TF2_ECN_PERMIT)) {
13582 * If the peer has ECN, mark data packets with ECN capable
13583 * transmission (ECT). Ignore pure ack packets,
13586 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) &&
13587 (sack_rxmit == 0)) {
13590 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
13593 ip->ip_tos |= IPTOS_ECN_ECT0;
13594 KMOD_TCPSTAT_INC(tcps_ecn_ect0);
13596 * Reply with proper ECN notifications.
13597 * Only set CWR on new data segments.
13599 if (tp->t_flags2 & TF2_ECN_SND_CWR) {
13601 tp->t_flags2 &= ~TF2_ECN_SND_CWR;
13604 if (tp->t_flags2 & TF2_ECN_SND_ECE)
13608 * If we are doing retransmissions, then snd_nxt will not reflect
13609 * the first unsent octet. For ACK only packets, we do not want the
13610 * sequence number of the retransmitted packet, we want the sequence
13611 * number of the next unsent octet. So, if there is no data (and no
13612 * SYN or FIN), use snd_max instead of snd_nxt when filling in
13613 * ti_seq. But if we are in persist state, snd_max might reflect
13614 * one byte beyond the right edge of the window, so use snd_nxt in
13615 * that case, since we know we aren't doing a retransmission.
13616 * (retransmit and persist are mutually exclusive...)
13618 if (sack_rxmit == 0) {
13619 if (len || (flags & (TH_SYN | TH_FIN)) ||
13620 rack->rc_in_persist) {
13621 th->th_seq = htonl(tp->snd_nxt);
13622 rack_seq = tp->snd_nxt;
13623 } else if (flags & TH_RST) {
13625 * For a Reset send the last cum ack in sequence
13626 * (this like any other choice may still generate a
13627 * challenge ack, if a ack-update packet is in
13630 th->th_seq = htonl(tp->snd_una);
13631 rack_seq = tp->snd_una;
13633 th->th_seq = htonl(tp->snd_max);
13634 rack_seq = tp->snd_max;
13637 th->th_seq = htonl(rsm->r_start);
13638 rack_seq = rsm->r_start;
13640 th->th_ack = htonl(tp->rcv_nxt);
13642 bcopy(opt, th + 1, optlen);
13643 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
13645 th->th_flags = flags;
13647 * Calculate receive window. Don't shrink window, but avoid silly
13649 * If a RST segment is sent, advertise a window of zero.
13651 if (flags & TH_RST) {
13654 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
13655 recwin < (long)segsiz)
13657 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
13658 recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
13659 recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
13663 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
13664 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is
13665 * handled in syncache.
13667 if (flags & TH_SYN)
13668 th->th_win = htons((u_short)
13669 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
13671 /* Avoid shrinking window with window scaling. */
13672 recwin = roundup2(recwin, 1 << tp->rcv_scale);
13673 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
13676 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
13677 * window. This may cause the remote transmitter to stall. This
13678 * flag tells soreceive() to disable delayed acknowledgements when
13679 * draining the buffer. This can occur if the receiver is
13680 * attempting to read more data than can be buffered prior to
13681 * transmitting on the connection.
13683 if (th->th_win == 0) {
13684 tp->t_sndzerowin++;
13685 tp->t_flags |= TF_RXWIN0SENT;
13687 tp->t_flags &= ~TF_RXWIN0SENT;
13688 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
13690 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
13691 if (to.to_flags & TOF_SIGNATURE) {
13693 * Calculate MD5 signature and put it into the place
13694 * determined before.
13695 * NOTE: since TCP options buffer doesn't point into
13696 * mbuf's data, calculate offset and use it.
13698 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
13699 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
13701 * Do not send segment if the calculation of MD5
13702 * digest has failed.
13710 * Put TCP length in extended header, and then checksum extended
13713 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
13717 * ip6_plen is not need to be filled now, and will be filled
13721 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
13722 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
13723 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
13724 th->th_sum = htons(0);
13725 UDPSTAT_INC(udps_opackets);
13727 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
13728 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
13729 th->th_sum = in6_cksum_pseudo(ip6,
13730 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
13735 #if defined(INET6) && defined(INET)
13741 m->m_pkthdr.csum_flags = CSUM_UDP;
13742 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
13743 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
13744 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
13745 th->th_sum = htons(0);
13746 UDPSTAT_INC(udps_opackets);
13748 m->m_pkthdr.csum_flags = CSUM_TCP;
13749 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
13750 th->th_sum = in_pseudo(ip->ip_src.s_addr,
13751 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
13752 IPPROTO_TCP + len + optlen));
13754 /* IP version must be set here for ipv4/ipv6 checking later */
13755 KASSERT(ip->ip_v == IPVERSION,
13756 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
13760 * Enable TSO and specify the size of the segments. The TCP pseudo
13761 * header checksum is always provided. XXX: Fixme: This is currently
13762 * not the case for IPv6.
13764 if (tso || force_tso) {
13765 KASSERT(force_tso || len > tp->t_maxseg - optlen,
13766 ("%s: len <= tso_segsz", __func__));
13767 m->m_pkthdr.csum_flags |= CSUM_TSO;
13768 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
13770 KASSERT(len + hdrlen == m_length(m, NULL),
13771 ("%s: mbuf chain different than expected: %d + %u != %u",
13772 __func__, len, hdrlen, m_length(m, NULL)));
13775 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
13776 hhook_run_tcp_est_out(tp, th, &to, len, tso);
13782 if (so->so_options & SO_DEBUG) {
13789 save = ipov->ih_len;
13790 ipov->ih_len = htons(m->m_pkthdr.len /* - hdrlen +
13791 * (th->th_off << 2) */ );
13793 tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0);
13797 ipov->ih_len = save;
13799 #endif /* TCPDEBUG */
13801 /* We're getting ready to send; log now. */
13802 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
13803 union tcp_log_stackspecific log;
13806 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
13807 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
13808 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
13809 if (rack->rack_no_prr)
13810 log.u_bbr.flex1 = 0;
13812 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
13813 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
13814 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
13815 log.u_bbr.flex4 = orig_len;
13817 log.u_bbr.flex5 = 0x80000000;
13819 log.u_bbr.flex5 = 0;
13820 /* Save off the early/late values */
13821 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
13822 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
13823 log.u_bbr.bw_inuse = rack_get_bw(rack);
13824 if (rsm || sack_rxmit) {
13826 log.u_bbr.flex8 = 2;
13828 log.u_bbr.flex8 = 1;
13830 log.u_bbr.flex8 = 0;
13832 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
13833 log.u_bbr.flex7 = mark;
13834 log.u_bbr.pkts_out = tp->t_maxseg;
13835 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
13836 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
13837 log.u_bbr.lt_epoch = cwnd_to_use;
13838 log.u_bbr.delivered = sendalot;
13839 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
13840 len, &log, false, NULL, NULL, 0, &tv);
13845 * Fill in IP length and desired time to live and send to IP level.
13846 * There should be a better way to handle ttl and tos; we could keep
13847 * them in the template, but need a way to checksum without them.
13850 * m->m_pkthdr.len should have been set before cksum calcuration,
13851 * because in6_cksum() need it.
13856 * we separately set hoplimit for every segment, since the
13857 * user might want to change the value via setsockopt. Also,
13858 * desired default hop limit might be changed via Neighbor
13861 ip6->ip6_hlim = in6_selecthlim(inp, NULL);
13864 * Set the packet size here for the benefit of DTrace
13865 * probes. ip6_output() will set it properly; it's supposed
13866 * to include the option header lengths as well.
13868 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
13870 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
13871 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
13873 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
13875 if (tp->t_state == TCPS_SYN_SENT)
13876 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
13878 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
13879 /* TODO: IPv6 IP6TOS_ECT bit on */
13880 error = ip6_output(m, inp->in6p_outputopts,
13882 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0),
13885 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
13886 mtu = inp->inp_route6.ro_nh->nh_mtu;
13889 #if defined(INET) && defined(INET6)
13894 ip->ip_len = htons(m->m_pkthdr.len);
13896 if (inp->inp_vflag & INP_IPV6PROTO)
13897 ip->ip_ttl = in6_selecthlim(inp, NULL);
13900 * If we do path MTU discovery, then we set DF on every
13901 * packet. This might not be the best thing to do according
13902 * to RFC3390 Section 2. However the tcp hostcache migitates
13903 * the problem so it affects only the first tcp connection
13906 * NB: Don't set DF on small MTU/MSS to have a safe
13909 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
13910 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
13911 if (tp->t_port == 0 || len < V_tcp_minmss) {
13912 ip->ip_off |= htons(IP_DF);
13915 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
13918 if (tp->t_state == TCPS_SYN_SENT)
13919 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
13921 TCP_PROBE5(send, NULL, tp, ip, tp, th);
13923 error = ip_output(m, inp->inp_options, &inp->inp_route,
13924 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0,
13926 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
13927 mtu = inp->inp_route.ro_nh->nh_mtu;
13933 lgb->tlb_errno = error;
13937 * In transmit state, time the transmission and arrange for the
13938 * retransmit. In persist state, just set snd_max.
13941 rack->forced_ack = 0; /* If we send something zap the FA flag */
13942 if (rsm && (doing_tlp == 0)) {
13943 /* Set we retransmitted */
13944 rack->rc_gp_saw_rec = 1;
13946 if (cwnd_to_use > tp->snd_ssthresh) {
13947 /* Set we sent in CA */
13948 rack->rc_gp_saw_ca = 1;
13950 /* Set we sent in SS */
13951 rack->rc_gp_saw_ss = 1;
13954 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
13955 (tp->t_flags & TF_SACK_PERMIT) &&
13956 tp->rcv_numsacks > 0)
13957 tcp_clean_dsack_blocks(tp);
13958 tot_len_this_send += len;
13960 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
13961 else if (len == 1) {
13962 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
13963 } else if (len > 1) {
13966 idx = (len / segsiz) + 3;
13967 if (idx >= TCP_MSS_ACCT_ATIMER)
13968 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
13970 counter_u64_add(rack_out_size[idx], 1);
13972 if (hw_tls && len > 0) {
13974 counter_u64_add(rack_tls_filled, 1);
13975 rack_log_type_hrdwtso(tp, rack, len, 0, orig_len, 1);
13978 counter_u64_add(rack_tls_rxt, 1);
13979 rack_log_type_hrdwtso(tp, rack, len, 2, orig_len, 1);
13980 } else if (doing_tlp) {
13981 counter_u64_add(rack_tls_tlp, 1);
13982 rack_log_type_hrdwtso(tp, rack, len, 3, orig_len, 1);
13983 } else if ( (ctf_outstanding(tp) + minseg) > sbavail(sb)) {
13984 counter_u64_add(rack_tls_app, 1);
13985 rack_log_type_hrdwtso(tp, rack, len, 4, orig_len, 1);
13986 } else if ((ctf_flight_size(tp, rack->r_ctl.rc_sacked) + minseg) > cwnd_to_use) {
13987 counter_u64_add(rack_tls_cwnd, 1);
13988 rack_log_type_hrdwtso(tp, rack, len, 5, orig_len, 1);
13989 } else if ((ctf_outstanding(tp) + minseg) > tp->snd_wnd) {
13990 counter_u64_add(rack_tls_rwnd, 1);
13991 rack_log_type_hrdwtso(tp, rack, len, 6, orig_len, 1);
13993 rack_log_type_hrdwtso(tp, rack, len, 7, orig_len, 1);
13994 counter_u64_add(rack_tls_other, 1);
13999 if (rack->rack_no_prr == 0) {
14000 if (sub_from_prr && (error == 0)) {
14001 if (rack->r_ctl.rc_prr_sndcnt >= len)
14002 rack->r_ctl.rc_prr_sndcnt -= len;
14004 rack->r_ctl.rc_prr_sndcnt = 0;
14008 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, cts,
14009 pass, rsm, us_cts);
14010 if ((error == 0) &&
14012 (tp->snd_una == tp->snd_max))
14013 rack->r_ctl.rc_tlp_rxt_last_time = cts;
14014 /* Now are we in persists? */
14015 if (rack->rc_in_persist == 0) {
14016 tcp_seq startseq = tp->snd_nxt;
14018 /* Track our lost count */
14019 if (rsm && (doing_tlp == 0))
14020 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start;
14022 * Advance snd_nxt over sequence space of this segment.
14025 /* We don't log or do anything with errors */
14027 if (doing_tlp == 0) {
14030 * Not a retransmission of some
14031 * sort, new data is going out so
14032 * clear our TLP count and flag.
14034 rack->rc_tlp_in_progress = 0;
14035 rack->r_ctl.rc_tlp_cnt_out = 0;
14039 * We have just sent a TLP, mark that it is true
14040 * and make sure our in progress is set so we
14041 * continue to check the count.
14043 rack->rc_tlp_in_progress = 1;
14044 rack->r_ctl.rc_tlp_cnt_out++;
14046 if (flags & (TH_SYN | TH_FIN)) {
14047 if (flags & TH_SYN)
14049 if (flags & TH_FIN) {
14051 tp->t_flags |= TF_SENTFIN;
14054 /* In the ENOBUFS case we do *not* update snd_max */
14058 tp->snd_nxt += len;
14059 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
14060 if (tp->snd_una == tp->snd_max) {
14062 * Update the time we just added data since
14063 * none was outstanding.
14065 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
14066 tp->t_acktime = ticks;
14068 tp->snd_max = tp->snd_nxt;
14070 * Time this transmission if not a retransmission and
14071 * not currently timing anything.
14072 * This is only relevant in case of switching back to
14075 if (tp->t_rtttime == 0) {
14076 tp->t_rtttime = ticks;
14077 tp->t_rtseq = startseq;
14078 KMOD_TCPSTAT_INC(tcps_segstimed);
14081 ((tp->t_flags & TF_GPUTINPROG) == 0))
14082 rack_start_gp_measurement(tp, rack, startseq, sb_offset);
14086 * Persist case, update snd_max but since we are in persist
14087 * mode (no window) we do not update snd_nxt.
14089 int32_t xlen = len;
14094 if (flags & TH_SYN)
14096 if (flags & TH_FIN) {
14098 tp->t_flags |= TF_SENTFIN;
14100 /* In the ENOBUFS case we do *not* update snd_max */
14101 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) {
14102 if (tp->snd_una == tp->snd_max) {
14104 * Update the time we just added data since
14105 * none was outstanding.
14107 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
14108 tp->t_acktime = ticks;
14110 tp->snd_max = tp->snd_nxt + len;
14115 rack->r_ctl.rc_agg_delayed = 0;
14118 rack->r_ctl.rc_agg_early = 0;
14119 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */
14121 * Failures do not advance the seq counter above. For the
14122 * case of ENOBUFS we will fall out and retry in 1ms with
14123 * the hpts. Everything else will just have to retransmit
14126 * In any case, we do not want to loop around for another
14127 * send without a good reason.
14132 tp->t_softerror = error;
14137 * Pace us right away to retry in a some
14140 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
14141 if (rack->rc_enobuf < 126)
14143 if (slot > ((rack->rc_rack_rtt / 2) * HPTS_USEC_IN_MSEC)) {
14144 slot = (rack->rc_rack_rtt / 2) * HPTS_USEC_IN_MSEC;
14146 if (slot < (10 * HPTS_USEC_IN_MSEC))
14147 slot = 10 * HPTS_USEC_IN_MSEC;
14149 counter_u64_add(rack_saw_enobuf, 1);
14154 * For some reason the interface we used initially
14155 * to send segments changed to another or lowered
14156 * its MTU. If TSO was active we either got an
14157 * interface without TSO capabilits or TSO was
14158 * turned off. If we obtained mtu from ip_output()
14159 * then update it and try again.
14162 tp->t_flags &= ~TF_TSO;
14164 tcp_mss_update(tp, -1, mtu, NULL, NULL);
14167 slot = 10 * HPTS_USEC_IN_MSEC;
14168 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
14171 counter_u64_add(rack_saw_enetunreach, 1);
14175 if (TCPS_HAVERCVDSYN(tp->t_state)) {
14176 tp->t_softerror = error;
14180 slot = 10 * HPTS_USEC_IN_MSEC;
14181 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
14185 rack->rc_enobuf = 0;
14187 KMOD_TCPSTAT_INC(tcps_sndtotal);
14190 * Data sent (as far as we can tell). If this advertises a larger
14191 * window than any other segment, then remember the size of the
14192 * advertised window. Any pending ACK has now been sent.
14194 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
14195 tp->rcv_adv = tp->rcv_nxt + recwin;
14196 tp->last_ack_sent = tp->rcv_nxt;
14197 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
14199 /* Assure when we leave that snd_nxt will point to top */
14200 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
14201 tp->snd_nxt = tp->snd_max;
14203 /* Do we need to turn off sendalot? */
14204 if (rack->r_ctl.rc_pace_max_segs &&
14205 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) {
14206 /* We hit our max. */
14208 } else if ((rack->rc_user_set_max_segs) &&
14209 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) {
14210 /* We hit the user defined max */
14214 if ((error == 0) && (flags & TH_FIN))
14215 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN);
14216 if (flags & TH_RST) {
14218 * We don't send again after sending a RST.
14223 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
14224 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) {
14226 * Get our pacing rate, if an error
14227 * occured in sending (ENOBUF) we would
14228 * hit the else if with slot preset. Other
14231 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz);
14234 rack->use_rack_rr) {
14235 /* Its a retransmit and we use the rack cheat? */
14237 (rack->rc_always_pace == 0) ||
14238 (rack->r_rr_config == 1)) {
14240 * We have no pacing set or we
14241 * are using old-style rack or
14242 * we are overriden to use the old 1ms pacing.
14244 slot = rack->r_ctl.rc_min_to * HPTS_USEC_IN_MSEC;
14248 /* set the rack tcb into the slot N */
14249 counter_u64_add(rack_paced_segments, 1);
14250 } else if (sendalot) {
14252 counter_u64_add(rack_unpaced_segments, 1);
14256 counter_u64_add(rack_unpaced_segments, 1);
14258 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0);
14263 rack_update_seg(struct tcp_rack *rack)
14267 orig_val = rack->r_ctl.rc_pace_max_segs;
14268 rack_set_pace_segments(rack->rc_tp, rack, __LINE__);
14269 if (orig_val != rack->r_ctl.rc_pace_max_segs)
14270 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL);
14274 * rack_ctloutput() must drop the inpcb lock before performing copyin on
14275 * socket option arguments. When it re-acquires the lock after the copy, it
14276 * has to revalidate that the connection is still valid for the socket
14280 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
14281 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
14283 struct epoch_tracker et;
14285 int32_t error = 0, optval;
14289 switch (sopt->sopt_name) {
14290 case TCP_RACK_PROP_RATE: /* URL:prop_rate */
14291 case TCP_RACK_PROP : /* URL:prop */
14292 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */
14293 case TCP_RACK_EARLY_RECOV: /* URL:early_recov */
14294 case TCP_RACK_PACE_REDUCE: /* Not used */
14295 /* Pacing related ones */
14296 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */
14297 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */
14298 case TCP_BBR_IWINTSO: /* URL:tso_iwin */
14299 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */
14300 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */
14301 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */
14302 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/
14303 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */
14304 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */
14305 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */
14306 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */
14307 case TCP_RACK_RR_CONF: /* URL:rrr_conf */
14308 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */
14309 /* End pacing related */
14311 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */
14312 case TCP_RACK_MIN_TO: /* URL:min_to */
14313 case TCP_RACK_EARLY_SEG: /* URL:early_seg */
14314 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */
14315 case TCP_RACK_REORD_FADE: /* URL:reord_fade */
14316 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */
14317 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */
14318 case TCP_RACK_TLP_USE: /* URL:tlp_use */
14319 case TCP_RACK_TLP_INC_VAR: /* URL:tlp_inc_var */
14320 case TCP_RACK_IDLE_REDUCE_HIGH: /* URL:idle_reduce_high */
14321 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */
14322 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */
14323 case TCP_RACK_DO_DETECTION: /* URL:detect */
14324 case TCP_NO_PRR: /* URL:noprr */
14325 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */
14326 case TCP_DATA_AFTER_CLOSE:
14327 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */
14328 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */
14329 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */
14330 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */
14331 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */
14332 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */
14333 case TCP_RACK_PROFILE: /* URL:profile */
14336 return (tcp_default_ctloutput(so, sopt, inp, tp));
14340 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
14344 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
14346 return (ECONNRESET);
14348 tp = intotcpcb(inp);
14349 rack = (struct tcp_rack *)tp->t_fb_ptr;
14350 switch (sopt->sopt_name) {
14351 case TCP_RACK_PROFILE:
14352 RACK_OPTS_INC(tcp_profile);
14354 /* pace_always=1 */
14355 rack->rc_always_pace = 1;
14356 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
14358 rack->rack_enable_scwnd = 1;
14360 rack->rc_gp_dyn_mul = 1;
14361 rack->r_ctl.rack_per_of_gp_ca = 100;
14363 rack->r_rr_config = 3;
14365 rack->r_ctl.rc_no_push_at_mrtt = 2;
14367 rack->rc_pace_to_cwnd = 1;
14368 rack->rc_pace_fill_if_rttin_range = 0;
14369 rack->rtt_limit_mul = 0;
14371 rack->rack_no_prr = 1;
14373 rack->r_limit_scw = 1;
14374 } else if (optval == 2) {
14375 /* pace_always=1 */
14376 rack->rc_always_pace = 1;
14377 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
14379 rack->rack_enable_scwnd = 1;
14381 rack->rc_gp_dyn_mul = 1;
14382 rack->r_ctl.rack_per_of_gp_ca = 100;
14384 rack->r_rr_config = 3;
14386 rack->r_ctl.rc_no_push_at_mrtt = 2;
14388 rack->rc_pace_to_cwnd = 1;
14389 rack->rc_pace_fill_if_rttin_range = 0;
14390 rack->rtt_limit_mul = 0;
14392 rack->rack_no_prr = 1;
14394 rack->r_limit_scw = 0;
14397 case TCP_SHARED_CWND_TIME_LIMIT:
14398 RACK_OPTS_INC(tcp_lscwnd);
14400 rack->r_limit_scw = 1;
14402 rack->r_limit_scw = 0;
14404 case TCP_RACK_PACE_TO_FILL:
14405 RACK_OPTS_INC(tcp_fillcw);
14407 rack->rc_pace_to_cwnd = 0;
14409 rack->rc_pace_to_cwnd = 1;
14410 if ((optval >= rack_gp_rtt_maxmul) &&
14411 rack_gp_rtt_maxmul &&
14413 rack->rc_pace_fill_if_rttin_range = 1;
14414 rack->rtt_limit_mul = optval;
14416 rack->rc_pace_fill_if_rttin_range = 0;
14417 rack->rtt_limit_mul = 0;
14420 case TCP_RACK_NO_PUSH_AT_MAX:
14421 RACK_OPTS_INC(tcp_npush);
14423 rack->r_ctl.rc_no_push_at_mrtt = 0;
14424 else if (optval < 0xff)
14425 rack->r_ctl.rc_no_push_at_mrtt = optval;
14429 case TCP_SHARED_CWND_ENABLE:
14430 RACK_OPTS_INC(tcp_rack_scwnd);
14432 rack->rack_enable_scwnd = 0;
14434 rack->rack_enable_scwnd = 1;
14436 case TCP_RACK_MBUF_QUEUE:
14437 /* Now do we use the LRO mbuf-queue feature */
14438 RACK_OPTS_INC(tcp_rack_mbufq);
14440 rack->r_mbuf_queue = 1;
14442 rack->r_mbuf_queue = 0;
14443 if (rack->r_mbuf_queue || rack->rc_always_pace)
14444 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
14446 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
14448 case TCP_RACK_NONRXT_CFG_RATE:
14449 RACK_OPTS_INC(tcp_rack_cfg_rate);
14451 rack->rack_rec_nonrxt_use_cr = 0;
14453 rack->rack_rec_nonrxt_use_cr = 1;
14456 RACK_OPTS_INC(tcp_rack_noprr);
14458 rack->rack_no_prr = 0;
14460 rack->rack_no_prr = 1;
14462 case TCP_TIMELY_DYN_ADJ:
14463 RACK_OPTS_INC(tcp_timely_dyn);
14465 rack->rc_gp_dyn_mul = 0;
14467 rack->rc_gp_dyn_mul = 1;
14468 if (optval >= 100) {
14470 * If the user sets something 100 or more
14471 * its the gp_ca value.
14473 rack->r_ctl.rack_per_of_gp_ca = optval;
14477 case TCP_RACK_DO_DETECTION:
14478 RACK_OPTS_INC(tcp_rack_do_detection);
14480 rack->do_detection = 0;
14482 rack->do_detection = 1;
14484 case TCP_RACK_PROP_RATE:
14485 if ((optval <= 0) || (optval >= 100)) {
14489 RACK_OPTS_INC(tcp_rack_prop_rate);
14490 rack->r_ctl.rc_prop_rate = optval;
14492 case TCP_RACK_TLP_USE:
14493 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
14497 RACK_OPTS_INC(tcp_tlp_use);
14498 rack->rack_tlp_threshold_use = optval;
14500 case TCP_RACK_PROP:
14501 /* RACK proportional rate reduction (bool) */
14502 RACK_OPTS_INC(tcp_rack_prop);
14503 rack->r_ctl.rc_prop_reduce = optval;
14505 case TCP_RACK_TLP_REDUCE:
14506 /* RACK TLP cwnd reduction (bool) */
14507 RACK_OPTS_INC(tcp_rack_tlp_reduce);
14508 rack->r_ctl.rc_tlp_cwnd_reduce = optval;
14510 case TCP_RACK_EARLY_RECOV:
14511 /* Should recovery happen early (bool) */
14512 RACK_OPTS_INC(tcp_rack_early_recov);
14513 rack->r_ctl.rc_early_recovery = optval;
14516 /* Pacing related ones */
14517 case TCP_RACK_PACE_ALWAYS:
14519 * zero is old rack method, 1 is new
14520 * method using a pacing rate.
14522 RACK_OPTS_INC(tcp_rack_pace_always);
14524 rack->rc_always_pace = 1;
14526 rack->rc_always_pace = 0;
14527 if (rack->r_mbuf_queue || rack->rc_always_pace)
14528 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
14530 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
14531 /* A rate may be set irate or other, if so set seg size */
14532 rack_update_seg(rack);
14534 case TCP_BBR_RACK_INIT_RATE:
14535 RACK_OPTS_INC(tcp_initial_rate);
14537 /* Change from kbits per second to bytes per second */
14540 rack->r_ctl.init_rate = val;
14541 if (rack->rc_init_win != rack_default_init_window) {
14545 * Options don't always get applied
14546 * in the order you think. So in order
14547 * to assure we update a cwnd we need
14548 * to check and see if we are still
14549 * where we should raise the cwnd.
14551 win = rc_init_window(rack);
14552 if (SEQ_GT(tp->snd_max, tp->iss))
14553 snt = tp->snd_max - tp->iss;
14557 (tp->snd_cwnd < win))
14558 tp->snd_cwnd = win;
14560 if (rack->rc_always_pace)
14561 rack_update_seg(rack);
14563 case TCP_BBR_IWINTSO:
14564 RACK_OPTS_INC(tcp_initial_win);
14565 if (optval && (optval <= 0xff)) {
14568 rack->rc_init_win = optval;
14569 win = rc_init_window(rack);
14570 if (SEQ_GT(tp->snd_max, tp->iss))
14571 snt = tp->snd_max - tp->iss;
14576 #ifdef NETFLIX_PEAKRATE
14577 tp->t_maxpeakrate |
14579 rack->r_ctl.init_rate)) {
14581 * We are not past the initial window
14582 * and we have some bases for pacing,
14583 * so we need to possibly adjust up
14584 * the cwnd. Note even if we don't set
14585 * the cwnd, its still ok to raise the rc_init_win
14586 * which can be used coming out of idle when we
14587 * would have a rate.
14589 if (tp->snd_cwnd < win)
14590 tp->snd_cwnd = win;
14592 if (rack->rc_always_pace)
14593 rack_update_seg(rack);
14597 case TCP_RACK_FORCE_MSEG:
14598 RACK_OPTS_INC(tcp_rack_force_max_seg);
14600 rack->rc_force_max_seg = 1;
14602 rack->rc_force_max_seg = 0;
14604 case TCP_RACK_PACE_MAX_SEG:
14605 /* Max segments size in a pace in bytes */
14606 RACK_OPTS_INC(tcp_rack_max_seg);
14607 rack->rc_user_set_max_segs = optval;
14608 rack_set_pace_segments(tp, rack, __LINE__);
14610 case TCP_RACK_PACE_RATE_REC:
14611 /* Set the fixed pacing rate in Bytes per second ca */
14612 RACK_OPTS_INC(tcp_rack_pace_rate_rec);
14613 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
14614 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
14615 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
14616 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
14617 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
14618 rack->use_fixed_rate = 1;
14619 rack_log_pacing_delay_calc(rack,
14620 rack->r_ctl.rc_fixed_pacing_rate_ss,
14621 rack->r_ctl.rc_fixed_pacing_rate_ca,
14622 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
14626 case TCP_RACK_PACE_RATE_SS:
14627 /* Set the fixed pacing rate in Bytes per second ca */
14628 RACK_OPTS_INC(tcp_rack_pace_rate_ss);
14629 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
14630 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
14631 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
14632 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
14633 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
14634 rack->use_fixed_rate = 1;
14635 rack_log_pacing_delay_calc(rack,
14636 rack->r_ctl.rc_fixed_pacing_rate_ss,
14637 rack->r_ctl.rc_fixed_pacing_rate_ca,
14638 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
14642 case TCP_RACK_PACE_RATE_CA:
14643 /* Set the fixed pacing rate in Bytes per second ca */
14644 RACK_OPTS_INC(tcp_rack_pace_rate_ca);
14645 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
14646 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
14647 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
14648 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
14649 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
14650 rack->use_fixed_rate = 1;
14651 rack_log_pacing_delay_calc(rack,
14652 rack->r_ctl.rc_fixed_pacing_rate_ss,
14653 rack->r_ctl.rc_fixed_pacing_rate_ca,
14654 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
14657 case TCP_RACK_GP_INCREASE_REC:
14658 RACK_OPTS_INC(tcp_gp_inc_rec);
14659 rack->r_ctl.rack_per_of_gp_rec = optval;
14660 rack_log_pacing_delay_calc(rack,
14661 rack->r_ctl.rack_per_of_gp_ss,
14662 rack->r_ctl.rack_per_of_gp_ca,
14663 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
14666 case TCP_RACK_GP_INCREASE_CA:
14667 RACK_OPTS_INC(tcp_gp_inc_ca);
14671 * We don't allow any reduction
14677 rack->r_ctl.rack_per_of_gp_ca = ca;
14678 rack_log_pacing_delay_calc(rack,
14679 rack->r_ctl.rack_per_of_gp_ss,
14680 rack->r_ctl.rack_per_of_gp_ca,
14681 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
14684 case TCP_RACK_GP_INCREASE_SS:
14685 RACK_OPTS_INC(tcp_gp_inc_ss);
14689 * We don't allow any reduction
14695 rack->r_ctl.rack_per_of_gp_ss = ss;
14696 rack_log_pacing_delay_calc(rack,
14697 rack->r_ctl.rack_per_of_gp_ss,
14698 rack->r_ctl.rack_per_of_gp_ca,
14699 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
14702 case TCP_RACK_RR_CONF:
14703 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate);
14704 if (optval && optval <= 3)
14705 rack->r_rr_config = optval;
14707 rack->r_rr_config = 0;
14709 case TCP_BBR_HDWR_PACE:
14710 RACK_OPTS_INC(tcp_hdwr_pacing);
14712 if (rack->rack_hdrw_pacing == 0) {
14713 rack->rack_hdw_pace_ena = 1;
14714 rack->rack_attempt_hdwr_pace = 0;
14718 rack->rack_hdw_pace_ena = 0;
14720 if (rack->rack_hdrw_pacing) {
14721 rack->rack_hdrw_pacing = 0;
14722 in_pcbdetach_txrtlmt(rack->rc_inp);
14727 /* End Pacing related ones */
14728 case TCP_RACK_PRR_SENDALOT:
14729 /* Allow PRR to send more than one seg */
14730 RACK_OPTS_INC(tcp_rack_prr_sendalot);
14731 rack->r_ctl.rc_prr_sendalot = optval;
14733 case TCP_RACK_MIN_TO:
14734 /* Minimum time between rack t-o's in ms */
14735 RACK_OPTS_INC(tcp_rack_min_to);
14736 rack->r_ctl.rc_min_to = optval;
14738 case TCP_RACK_EARLY_SEG:
14739 /* If early recovery max segments */
14740 RACK_OPTS_INC(tcp_rack_early_seg);
14741 rack->r_ctl.rc_early_recovery_segs = optval;
14743 case TCP_RACK_REORD_THRESH:
14744 /* RACK reorder threshold (shift amount) */
14745 RACK_OPTS_INC(tcp_rack_reord_thresh);
14746 if ((optval > 0) && (optval < 31))
14747 rack->r_ctl.rc_reorder_shift = optval;
14751 case TCP_RACK_REORD_FADE:
14752 /* Does reordering fade after ms time */
14753 RACK_OPTS_INC(tcp_rack_reord_fade);
14754 rack->r_ctl.rc_reorder_fade = optval;
14756 case TCP_RACK_TLP_THRESH:
14757 /* RACK TLP theshold i.e. srtt+(srtt/N) */
14758 RACK_OPTS_INC(tcp_rack_tlp_thresh);
14760 rack->r_ctl.rc_tlp_threshold = optval;
14764 case TCP_BBR_USE_RACK_RR:
14765 RACK_OPTS_INC(tcp_rack_rr);
14767 rack->use_rack_rr = 1;
14769 rack->use_rack_rr = 0;
14771 case TCP_RACK_PKT_DELAY:
14772 /* RACK added ms i.e. rack-rtt + reord + N */
14773 RACK_OPTS_INC(tcp_rack_pkt_delay);
14774 rack->r_ctl.rc_pkt_delay = optval;
14776 case TCP_RACK_TLP_INC_VAR:
14777 /* Does TLP include rtt variance in t-o */
14780 case TCP_RACK_IDLE_REDUCE_HIGH:
14785 tp->t_delayed_ack = 0;
14787 tp->t_delayed_ack = 1;
14788 if (tp->t_flags & TF_DELACK) {
14789 tp->t_flags &= ~TF_DELACK;
14790 tp->t_flags |= TF_ACKNOW;
14791 NET_EPOCH_ENTER(et);
14793 NET_EPOCH_EXIT(et);
14797 case TCP_BBR_RACK_RTT_USE:
14798 if ((optval != USE_RTT_HIGH) &&
14799 (optval != USE_RTT_LOW) &&
14800 (optval != USE_RTT_AVG))
14803 rack->r_ctl.rc_rate_sample_method = optval;
14805 case TCP_DATA_AFTER_CLOSE:
14807 rack->rc_allow_data_af_clo = 1;
14809 rack->rc_allow_data_af_clo = 0;
14811 case TCP_RACK_PACE_REDUCE:
14812 /* sysctl only now */
14816 return (tcp_default_ctloutput(so, sopt, inp, tp));
14819 #ifdef NETFLIX_STATS
14820 tcp_log_socket_option(tp, sopt->sopt_name, optval, error);
14827 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
14828 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
14830 int32_t error, optval;
14833 * Because all our options are either boolean or an int, we can just
14834 * pull everything into optval and then unlock and copy. If we ever
14835 * add a option that is not a int, then this will have quite an
14836 * impact to this routine.
14839 switch (sopt->sopt_name) {
14840 case TCP_RACK_PROFILE:
14841 /* You cannot retrieve a profile, its write only */
14844 case TCP_RACK_PACE_TO_FILL:
14845 optval = rack->rc_pace_to_cwnd;
14847 case TCP_RACK_NO_PUSH_AT_MAX:
14848 optval = rack->r_ctl.rc_no_push_at_mrtt;
14850 case TCP_SHARED_CWND_ENABLE:
14851 optval = rack->rack_enable_scwnd;
14853 case TCP_RACK_NONRXT_CFG_RATE:
14854 optval = rack->rack_rec_nonrxt_use_cr;
14857 optval = rack->rack_no_prr;
14859 case TCP_RACK_DO_DETECTION:
14860 optval = rack->do_detection;
14862 case TCP_RACK_MBUF_QUEUE:
14863 /* Now do we use the LRO mbuf-queue feature */
14864 optval = rack->r_mbuf_queue;
14866 case TCP_TIMELY_DYN_ADJ:
14867 optval = rack->rc_gp_dyn_mul;
14869 case TCP_BBR_IWINTSO:
14870 optval = rack->rc_init_win;
14872 case TCP_RACK_PROP_RATE:
14873 optval = rack->r_ctl.rc_prop_rate;
14875 case TCP_RACK_PROP:
14876 /* RACK proportional rate reduction (bool) */
14877 optval = rack->r_ctl.rc_prop_reduce;
14879 case TCP_RACK_TLP_REDUCE:
14880 /* RACK TLP cwnd reduction (bool) */
14881 optval = rack->r_ctl.rc_tlp_cwnd_reduce;
14883 case TCP_RACK_EARLY_RECOV:
14884 /* Should recovery happen early (bool) */
14885 optval = rack->r_ctl.rc_early_recovery;
14887 case TCP_RACK_PACE_REDUCE:
14888 /* RACK Hptsi reduction factor (divisor) */
14891 case TCP_BBR_RACK_INIT_RATE:
14892 val = rack->r_ctl.init_rate;
14893 /* convert to kbits per sec */
14896 optval = (uint32_t)val;
14898 case TCP_RACK_FORCE_MSEG:
14899 optval = rack->rc_force_max_seg;
14901 case TCP_RACK_PACE_MAX_SEG:
14902 /* Max segments in a pace */
14903 optval = rack->rc_user_set_max_segs;
14905 case TCP_RACK_PACE_ALWAYS:
14906 /* Use the always pace method */
14907 optval = rack->rc_always_pace;
14909 case TCP_RACK_PRR_SENDALOT:
14910 /* Allow PRR to send more than one seg */
14911 optval = rack->r_ctl.rc_prr_sendalot;
14913 case TCP_RACK_MIN_TO:
14914 /* Minimum time between rack t-o's in ms */
14915 optval = rack->r_ctl.rc_min_to;
14917 case TCP_RACK_EARLY_SEG:
14918 /* If early recovery max segments */
14919 optval = rack->r_ctl.rc_early_recovery_segs;
14921 case TCP_RACK_REORD_THRESH:
14922 /* RACK reorder threshold (shift amount) */
14923 optval = rack->r_ctl.rc_reorder_shift;
14925 case TCP_RACK_REORD_FADE:
14926 /* Does reordering fade after ms time */
14927 optval = rack->r_ctl.rc_reorder_fade;
14929 case TCP_BBR_USE_RACK_RR:
14930 /* Do we use the rack cheat for rxt */
14931 optval = rack->use_rack_rr;
14933 case TCP_RACK_RR_CONF:
14934 optval = rack->r_rr_config;
14936 case TCP_BBR_HDWR_PACE:
14937 optval = rack->rack_hdw_pace_ena;
14939 case TCP_RACK_TLP_THRESH:
14940 /* RACK TLP theshold i.e. srtt+(srtt/N) */
14941 optval = rack->r_ctl.rc_tlp_threshold;
14943 case TCP_RACK_PKT_DELAY:
14944 /* RACK added ms i.e. rack-rtt + reord + N */
14945 optval = rack->r_ctl.rc_pkt_delay;
14947 case TCP_RACK_TLP_USE:
14948 optval = rack->rack_tlp_threshold_use;
14950 case TCP_RACK_TLP_INC_VAR:
14951 /* Does TLP include rtt variance in t-o */
14954 case TCP_RACK_IDLE_REDUCE_HIGH:
14957 case TCP_RACK_PACE_RATE_CA:
14958 optval = rack->r_ctl.rc_fixed_pacing_rate_ca;
14960 case TCP_RACK_PACE_RATE_SS:
14961 optval = rack->r_ctl.rc_fixed_pacing_rate_ss;
14963 case TCP_RACK_PACE_RATE_REC:
14964 optval = rack->r_ctl.rc_fixed_pacing_rate_rec;
14966 case TCP_RACK_GP_INCREASE_SS:
14967 optval = rack->r_ctl.rack_per_of_gp_ca;
14969 case TCP_RACK_GP_INCREASE_CA:
14970 optval = rack->r_ctl.rack_per_of_gp_ss;
14972 case TCP_BBR_RACK_RTT_USE:
14973 optval = rack->r_ctl.rc_rate_sample_method;
14976 optval = tp->t_delayed_ack;
14978 case TCP_DATA_AFTER_CLOSE:
14979 optval = rack->rc_allow_data_af_clo;
14981 case TCP_SHARED_CWND_TIME_LIMIT:
14982 optval = rack->r_limit_scw;
14985 return (tcp_default_ctloutput(so, sopt, inp, tp));
14990 error = sooptcopyout(sopt, &optval, sizeof optval);
14996 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp)
14998 int32_t error = EINVAL;
14999 struct tcp_rack *rack;
15001 rack = (struct tcp_rack *)tp->t_fb_ptr;
15002 if (rack == NULL) {
15006 if (sopt->sopt_dir == SOPT_SET) {
15007 return (rack_set_sockopt(so, sopt, inp, tp, rack));
15008 } else if (sopt->sopt_dir == SOPT_GET) {
15009 return (rack_get_sockopt(so, sopt, inp, tp, rack));
15017 rack_pru_options(struct tcpcb *tp, int flags)
15019 if (flags & PRUS_OOB)
15020 return (EOPNOTSUPP);
15024 static struct tcp_function_block __tcp_rack = {
15025 .tfb_tcp_block_name = __XSTRING(STACKNAME),
15026 .tfb_tcp_output = rack_output,
15027 .tfb_do_queued_segments = ctf_do_queued_segments,
15028 .tfb_do_segment_nounlock = rack_do_segment_nounlock,
15029 .tfb_tcp_do_segment = rack_do_segment,
15030 .tfb_tcp_ctloutput = rack_ctloutput,
15031 .tfb_tcp_fb_init = rack_init,
15032 .tfb_tcp_fb_fini = rack_fini,
15033 .tfb_tcp_timer_stop_all = rack_stopall,
15034 .tfb_tcp_timer_activate = rack_timer_activate,
15035 .tfb_tcp_timer_active = rack_timer_active,
15036 .tfb_tcp_timer_stop = rack_timer_stop,
15037 .tfb_tcp_rexmit_tmr = rack_remxt_tmr,
15038 .tfb_tcp_handoff_ok = rack_handoff_ok,
15039 .tfb_pru_options = rack_pru_options,
15042 static const char *rack_stack_names[] = {
15043 __XSTRING(STACKNAME),
15045 __XSTRING(STACKALIAS),
15050 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
15052 memset(mem, 0, size);
15057 rack_dtor(void *mem, int32_t size, void *arg)
15062 static bool rack_mod_inited = false;
15065 tcp_addrack(module_t mod, int32_t type, void *data)
15072 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
15073 sizeof(struct rack_sendmap),
15074 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
15076 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
15077 sizeof(struct tcp_rack),
15078 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
15080 sysctl_ctx_init(&rack_sysctl_ctx);
15081 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
15082 SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
15085 __XSTRING(STACKALIAS),
15087 __XSTRING(STACKNAME),
15089 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
15091 if (rack_sysctl_root == NULL) {
15092 printf("Failed to add sysctl node\n");
15096 rack_init_sysctls();
15097 num_stacks = nitems(rack_stack_names);
15098 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
15099 rack_stack_names, &num_stacks);
15101 printf("Failed to register %s stack name for "
15102 "%s module\n", rack_stack_names[num_stacks],
15103 __XSTRING(MODNAME));
15104 sysctl_ctx_free(&rack_sysctl_ctx);
15106 uma_zdestroy(rack_zone);
15107 uma_zdestroy(rack_pcb_zone);
15108 rack_counter_destroy();
15109 printf("Failed to register rack module -- err:%d\n", err);
15112 tcp_lro_reg_mbufq();
15113 rack_mod_inited = true;
15116 err = deregister_tcp_functions(&__tcp_rack, true, false);
15119 err = deregister_tcp_functions(&__tcp_rack, false, true);
15122 if (rack_mod_inited) {
15123 uma_zdestroy(rack_zone);
15124 uma_zdestroy(rack_pcb_zone);
15125 sysctl_ctx_free(&rack_sysctl_ctx);
15126 rack_counter_destroy();
15127 rack_mod_inited = false;
15129 tcp_lro_dereg_mbufq();
15133 return (EOPNOTSUPP);
15138 static moduledata_t tcp_rack = {
15139 .name = __XSTRING(MODNAME),
15140 .evhand = tcp_addrack,
15144 MODULE_VERSION(MODNAME, 1);
15145 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
15146 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);