2 * Copyright (c) 2016-2020 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include "opt_inet6.h"
32 #include "opt_ipsec.h"
33 #include "opt_tcpdebug.h"
34 #include "opt_ratelimit.h"
35 #include "opt_kern_tls.h"
36 #include <sys/param.h>
38 #include <sys/module.h>
39 #include <sys/kernel.h>
41 #include <sys/hhook.h>
44 #include <sys/malloc.h>
46 #include <sys/mutex.h>
48 #include <sys/proc.h> /* for proc0 declaration */
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
54 #include <sys/qmath.h>
56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
60 #include <sys/refcount.h>
61 #include <sys/queue.h>
62 #include <sys/tim_filter.h>
64 #include <sys/kthread.h>
65 #include <sys/kern_prefetch.h>
66 #include <sys/protosw.h>
68 #include <sys/sched.h>
69 #include <machine/cpu.h>
73 #include <net/route.h>
74 #include <net/route/nhop.h>
77 #define TCPSTATES /* for logging */
79 #include <netinet/in.h>
80 #include <netinet/in_kdtrace.h>
81 #include <netinet/in_pcb.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
85 #include <netinet/ip_var.h>
86 #include <netinet/ip6.h>
87 #include <netinet6/in6_pcb.h>
88 #include <netinet6/ip6_var.h>
89 #include <netinet/tcp.h>
91 #include <netinet/tcp_fsm.h>
92 #include <netinet/tcp_log_buf.h>
93 #include <netinet/tcp_seq.h>
94 #include <netinet/tcp_timer.h>
95 #include <netinet/tcp_var.h>
96 #include <netinet/tcp_syncache.h>
97 #include <netinet/tcp_hpts.h>
98 #include <netinet/tcp_ratelimit.h>
99 #include <netinet/tcp_accounting.h>
100 #include <netinet/tcpip.h>
101 #include <netinet/cc/cc.h>
102 #include <netinet/cc/cc_newreno.h>
103 #include <netinet/tcp_fastopen.h>
104 #include <netinet/tcp_lro.h>
105 #ifdef NETFLIX_SHARED_CWND
106 #include <netinet/tcp_shared_cwnd.h>
109 #include <netinet/tcp_debug.h>
110 #endif /* TCPDEBUG */
112 #include <netinet/tcp_offload.h>
115 #include <netinet6/tcp6_var.h>
117 #include <netinet/tcp_ecn.h>
119 #include <netipsec/ipsec_support.h>
121 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
122 #include <netipsec/ipsec.h>
123 #include <netipsec/ipsec6.h>
126 #include <netinet/udp.h>
127 #include <netinet/udp_var.h>
128 #include <machine/in_cksum.h>
131 #include <security/mac/mac_framework.h>
133 #include "sack_filter.h"
134 #include "tcp_rack.h"
135 #include "rack_bbr_common.h"
137 uma_zone_t rack_zone;
138 uma_zone_t rack_pcb_zone;
141 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t)))
144 VNET_DECLARE(uint32_t, newreno_beta);
145 VNET_DECLARE(uint32_t, newreno_beta_ecn);
146 #define V_newreno_beta VNET(newreno_beta)
147 #define V_newreno_beta_ecn VNET(newreno_beta_ecn)
150 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block");
151 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options");
153 struct sysctl_ctx_list rack_sysctl_ctx;
154 struct sysctl_oid *rack_sysctl_root;
160 * The RACK module incorporates a number of
161 * TCP ideas that have been put out into the IETF
162 * over the last few years:
163 * - Matt Mathis's Rate Halving which slowly drops
164 * the congestion window so that the ack clock can
165 * be maintained during a recovery.
166 * - Yuchung Cheng's RACK TCP (for which its named) that
167 * will stop us using the number of dup acks and instead
168 * use time as the gage of when we retransmit.
169 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
170 * of Dukkipati et.al.
171 * RACK depends on SACK, so if an endpoint arrives that
172 * cannot do SACK the state machine below will shuttle the
173 * connection back to using the "default" TCP stack that is
176 * To implement RACK the original TCP stack was first decomposed
177 * into a functional state machine with individual states
178 * for each of the possible TCP connection states. The do_segment
179 * functions role in life is to mandate the connection supports SACK
180 * initially and then assure that the RACK state matches the conenction
181 * state before calling the states do_segment function. Each
182 * state is simplified due to the fact that the original do_segment
183 * has been decomposed and we *know* what state we are in (no
184 * switches on the state) and all tests for SACK are gone. This
185 * greatly simplifies what each state does.
187 * TCP output is also over-written with a new version since it
188 * must maintain the new rack scoreboard.
191 static int32_t rack_tlp_thresh = 1;
192 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */
193 static int32_t rack_tlp_use_greater = 1;
194 static int32_t rack_reorder_thresh = 2;
195 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000
197 static uint8_t rack_req_measurements = 1;
198 /* Attack threshold detections */
199 static uint32_t rack_highest_sack_thresh_seen = 0;
200 static uint32_t rack_highest_move_thresh_seen = 0;
201 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */
202 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */
203 static int32_t rack_hw_rate_caps = 1; /* 1; */
204 static int32_t rack_hw_rate_min = 0; /* 1500000;*/
205 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */
206 static int32_t rack_hw_up_only = 1;
207 static int32_t rack_stats_gets_ms_rtt = 1;
208 static int32_t rack_prr_addbackmax = 2;
209 static int32_t rack_do_hystart = 0;
210 static int32_t rack_apply_rtt_with_reduced_conf = 0;
212 static int32_t rack_pkt_delay = 1000;
213 static int32_t rack_send_a_lot_in_prr = 1;
214 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */
215 static int32_t rack_verbose_logging = 0;
216 static int32_t rack_ignore_data_after_close = 1;
217 static int32_t rack_enable_shared_cwnd = 1;
218 static int32_t rack_use_cmp_acks = 1;
219 static int32_t rack_use_fsb = 1;
220 static int32_t rack_use_rfo = 1;
221 static int32_t rack_use_rsm_rfo = 1;
222 static int32_t rack_max_abc_post_recovery = 2;
223 static int32_t rack_client_low_buf = 0;
224 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */
225 #ifdef TCP_ACCOUNTING
226 static int32_t rack_tcp_accounting = 0;
228 static int32_t rack_limits_scwnd = 1;
229 static int32_t rack_enable_mqueue_for_nonpaced = 0;
230 static int32_t rack_disable_prr = 0;
231 static int32_t use_rack_rr = 1;
232 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */
233 static int32_t rack_persist_min = 250000; /* 250usec */
234 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */
235 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */
236 static int32_t rack_default_init_window = 0; /* Use system default */
237 static int32_t rack_limit_time_with_srtt = 0;
238 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */
239 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */
240 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */
241 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */
242 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */
245 * Currently regular tcp has a rto_min of 30ms
246 * the backoff goes 12 times so that ends up
247 * being a total of 122.850 seconds before a
248 * connection is killed.
250 static uint32_t rack_def_data_window = 20;
251 static uint32_t rack_goal_bdp = 2;
252 static uint32_t rack_min_srtts = 1;
253 static uint32_t rack_min_measure_usec = 0;
254 static int32_t rack_tlp_min = 10000; /* 10ms */
255 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */
256 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */
257 static const int32_t rack_free_cache = 2;
258 static int32_t rack_hptsi_segments = 40;
259 static int32_t rack_rate_sample_method = USE_RTT_LOW;
260 static int32_t rack_pace_every_seg = 0;
261 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */
262 static int32_t rack_slot_reduction = 4;
263 static int32_t rack_wma_divisor = 8; /* For WMA calculation */
264 static int32_t rack_cwnd_block_ends_measure = 0;
265 static int32_t rack_rwnd_block_ends_measure = 0;
266 static int32_t rack_def_profile = 0;
268 static int32_t rack_lower_cwnd_at_tlp = 0;
269 static int32_t rack_limited_retran = 0;
270 static int32_t rack_always_send_oldest = 0;
271 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
273 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */
274 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */
275 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */
278 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */
279 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */
280 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */
281 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */
282 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */
284 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */
285 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */
286 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */
287 static uint32_t rack_probertt_use_min_rtt_exit = 0;
288 static uint32_t rack_probe_rtt_sets_cwnd = 0;
289 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */
290 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */
291 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */
292 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */
293 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */
294 static uint32_t rack_probertt_filter_life = 10000000;
295 static uint32_t rack_probertt_lower_within = 10;
296 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */
297 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */
298 static int32_t rack_probertt_clear_is = 1;
299 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */
300 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */
303 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */
305 /* Timely information */
306 /* Combine these two gives the range of 'no change' to bw */
307 /* ie the up/down provide the upper and lower bound */
308 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */
309 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */
310 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */
311 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */
312 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */
313 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multiplier */
314 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */
315 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */
316 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */
317 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */
318 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */
319 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */
320 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */
321 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */
322 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */
323 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */
324 static int32_t rack_use_max_for_nobackoff = 0;
325 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */
326 static int32_t rack_timely_no_stopping = 0;
327 static int32_t rack_down_raise_thresh = 100;
328 static int32_t rack_req_segs = 1;
329 static uint64_t rack_bw_rate_cap = 0;
330 static uint32_t rack_trace_point_config = 0;
331 static uint32_t rack_trace_point_bb_mode = 4;
332 static int32_t rack_trace_point_count = 0;
335 /* Weird delayed ack mode */
336 static int32_t rack_use_imac_dack = 0;
337 /* Rack specific counters */
338 counter_u64_t rack_saw_enobuf;
339 counter_u64_t rack_saw_enobuf_hw;
340 counter_u64_t rack_saw_enetunreach;
341 counter_u64_t rack_persists_sends;
342 counter_u64_t rack_persists_acks;
343 counter_u64_t rack_persists_loss;
344 counter_u64_t rack_persists_lost_ends;
346 counter_u64_t rack_adjust_map_bw;
348 /* Tail loss probe counters */
349 counter_u64_t rack_tlp_tot;
350 counter_u64_t rack_tlp_newdata;
351 counter_u64_t rack_tlp_retran;
352 counter_u64_t rack_tlp_retran_bytes;
353 counter_u64_t rack_to_tot;
354 counter_u64_t rack_hot_alloc;
355 counter_u64_t rack_to_alloc;
356 counter_u64_t rack_to_alloc_hard;
357 counter_u64_t rack_to_alloc_emerg;
358 counter_u64_t rack_to_alloc_limited;
359 counter_u64_t rack_alloc_limited_conns;
360 counter_u64_t rack_split_limited;
362 counter_u64_t rack_multi_single_eq;
363 counter_u64_t rack_proc_non_comp_ack;
365 counter_u64_t rack_fto_send;
366 counter_u64_t rack_fto_rsm_send;
367 counter_u64_t rack_nfto_resend;
368 counter_u64_t rack_non_fto_send;
369 counter_u64_t rack_extended_rfo;
371 counter_u64_t rack_sack_proc_all;
372 counter_u64_t rack_sack_proc_short;
373 counter_u64_t rack_sack_proc_restart;
374 counter_u64_t rack_sack_attacks_detected;
375 counter_u64_t rack_sack_attacks_reversed;
376 counter_u64_t rack_sack_used_next_merge;
377 counter_u64_t rack_sack_splits;
378 counter_u64_t rack_sack_used_prev_merge;
379 counter_u64_t rack_sack_skipped_acked;
380 counter_u64_t rack_ack_total;
381 counter_u64_t rack_express_sack;
382 counter_u64_t rack_sack_total;
383 counter_u64_t rack_move_none;
384 counter_u64_t rack_move_some;
386 counter_u64_t rack_input_idle_reduces;
387 counter_u64_t rack_collapsed_win;
388 counter_u64_t rack_collapsed_win_seen;
389 counter_u64_t rack_collapsed_win_rxt;
390 counter_u64_t rack_collapsed_win_rxt_bytes;
391 counter_u64_t rack_try_scwnd;
392 counter_u64_t rack_hw_pace_init_fail;
393 counter_u64_t rack_hw_pace_lost;
395 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
396 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
399 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2)))
401 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \
402 (tv) = (value) + slop; \
403 if ((u_long)(tv) < (u_long)(tvmin)) \
405 if ((u_long)(tv) > (u_long)(tvmax)) \
410 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line);
413 rack_process_ack(struct mbuf *m, struct tcphdr *th,
414 struct socket *so, struct tcpcb *tp, struct tcpopt *to,
415 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val);
417 rack_process_data(struct mbuf *m, struct tcphdr *th,
418 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
419 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
421 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
422 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery);
423 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
424 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack,
426 static struct rack_sendmap *
427 rack_check_recovery_mode(struct tcpcb *tp,
430 rack_cong_signal(struct tcpcb *tp,
431 uint32_t type, uint32_t ack, int );
432 static void rack_counter_destroy(void);
434 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt);
435 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
437 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override);
439 rack_do_segment(struct mbuf *m, struct tcphdr *th,
440 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
442 static void rack_dtor(void *mem, int32_t size, void *arg);
444 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
445 uint32_t flex1, uint32_t flex2,
446 uint32_t flex3, uint32_t flex4,
447 uint32_t flex5, uint32_t flex6,
448 uint16_t flex7, uint8_t mod);
451 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
452 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line,
453 struct rack_sendmap *rsm, uint8_t quality);
454 static struct rack_sendmap *
455 rack_find_high_nonack(struct tcp_rack *rack,
456 struct rack_sendmap *rsm);
457 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
458 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
459 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
460 static int rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt);
462 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
463 tcp_seq th_ack, int line, uint8_t quality);
465 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss);
466 static int32_t rack_handoff_ok(struct tcpcb *tp);
467 static int32_t rack_init(struct tcpcb *tp);
468 static void rack_init_sysctls(void);
470 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
471 struct tcphdr *th, int entered_rec, int dup_ack_struck);
473 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
474 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts,
475 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls);
478 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
479 struct rack_sendmap *rsm);
480 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm);
481 static int32_t rack_output(struct tcpcb *tp);
484 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
485 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
486 uint32_t cts, int *moved_two);
487 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq);
488 static void rack_remxt_tmr(struct tcpcb *tp);
489 static int rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt);
490 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
491 static int32_t rack_stopall(struct tcpcb *tp);
493 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type,
495 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type);
496 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
497 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type);
499 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
500 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag);
502 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
503 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag);
505 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
506 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack);
507 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
509 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
510 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
511 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
513 rack_do_closing(struct mbuf *m, struct tcphdr *th,
514 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
515 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
517 rack_do_established(struct mbuf *m, struct tcphdr *th,
518 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
519 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
521 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
522 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
523 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos);
525 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
526 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
527 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
529 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
530 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
531 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
533 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
534 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
535 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
537 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
538 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
539 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
541 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
542 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
543 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
544 struct rack_sendmap *
545 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
547 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt,
548 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt);
550 tcp_rack_partialack(struct tcpcb *tp);
552 rack_set_profile(struct tcp_rack *rack, int prof);
554 rack_apply_deferred_options(struct tcp_rack *rack);
556 int32_t rack_clear_counter=0;
559 rack_trace_point(struct tcp_rack *rack, int num)
561 if (((rack_trace_point_config == num) ||
562 (rack_trace_point_config = 0xffffffff)) &&
563 (rack_trace_point_bb_mode != 0) &&
564 (rack_trace_point_count > 0) &&
565 (rack->rc_tp->t_logstate == 0)) {
567 res = atomic_fetchadd_int(&rack_trace_point_count, -1);
569 rack->rc_tp->t_logstate = rack_trace_point_bb_mode;
571 /* Loss a race assure its zero now */
572 rack_trace_point_count = 0;
578 rack_set_cc_pacing(struct tcp_rack *rack)
581 struct cc_newreno_opts opt;
582 struct newreno old, *ptr;
586 if (rack->rc_pacing_cc_set)
590 if (tp->cc_algo == NULL) {
594 rack->rc_pacing_cc_set = 1;
595 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
596 /* Not new-reno we can't play games with beta! */
599 ptr = ((struct newreno *)tp->ccv->cc_data);
600 if (CC_ALGO(tp)->ctl_output == NULL) {
601 /* Huh, why does new_reno no longer have a set function? */
605 /* Just the default values */
606 old.beta = V_newreno_beta_ecn;
607 old.beta_ecn = V_newreno_beta_ecn;
608 old.newreno_flags = 0;
610 old.beta = ptr->beta;
611 old.beta_ecn = ptr->beta_ecn;
612 old.newreno_flags = ptr->newreno_flags;
614 sopt.sopt_valsize = sizeof(struct cc_newreno_opts);
615 sopt.sopt_dir = SOPT_SET;
616 opt.name = CC_NEWRENO_BETA;
617 opt.val = rack->r_ctl.rc_saved_beta.beta;
618 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
623 * Hack alert we need to set in our newreno_flags
624 * so that Abe behavior is also applied.
626 ((struct newreno *)tp->ccv->cc_data)->newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED;
627 opt.name = CC_NEWRENO_BETA_ECN;
628 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn;
629 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
633 /* Save off the original values for restoral */
634 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno));
636 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
637 union tcp_log_stackspecific log;
640 ptr = ((struct newreno *)tp->ccv->cc_data);
641 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
642 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
644 log.u_bbr.flex1 = ptr->beta;
645 log.u_bbr.flex2 = ptr->beta_ecn;
646 log.u_bbr.flex3 = ptr->newreno_flags;
648 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta;
649 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn;
650 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags;
651 log.u_bbr.flex7 = rack->gp_ready;
652 log.u_bbr.flex7 <<= 1;
653 log.u_bbr.flex7 |= rack->use_fixed_rate;
654 log.u_bbr.flex7 <<= 1;
655 log.u_bbr.flex7 |= rack->rc_pacing_cc_set;
656 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
658 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error,
659 0, &log, false, NULL, NULL, 0, &tv);
664 rack_undo_cc_pacing(struct tcp_rack *rack)
666 struct newreno old, *ptr;
669 if (rack->rc_pacing_cc_set == 0)
672 rack->rc_pacing_cc_set = 0;
673 if (tp->cc_algo == NULL)
676 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
677 /* Not new-reno nothing to do! */
680 ptr = ((struct newreno *)tp->ccv->cc_data);
683 * This happens at rack_fini() if the
684 * cc module gets freed on us. In that
685 * case we loose our "new" settings but
686 * thats ok, since the tcb is going away anyway.
690 /* Grab out our set values */
691 memcpy(&old, ptr, sizeof(struct newreno));
692 /* Copy back in the original values */
693 memcpy(ptr, &rack->r_ctl.rc_saved_beta, sizeof(struct newreno));
694 /* Now save back the values we had set in (for when pacing is restored) */
695 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno));
696 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
697 union tcp_log_stackspecific log;
700 ptr = ((struct newreno *)tp->ccv->cc_data);
701 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
702 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
703 log.u_bbr.flex1 = ptr->beta;
704 log.u_bbr.flex2 = ptr->beta_ecn;
705 log.u_bbr.flex3 = ptr->newreno_flags;
706 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta;
707 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn;
708 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags;
709 log.u_bbr.flex7 = rack->gp_ready;
710 log.u_bbr.flex7 <<= 1;
711 log.u_bbr.flex7 |= rack->use_fixed_rate;
712 log.u_bbr.flex7 <<= 1;
713 log.u_bbr.flex7 |= rack->rc_pacing_cc_set;
714 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
716 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
717 0, &log, false, NULL, NULL, 0, &tv);
721 #ifdef NETFLIX_PEAKRATE
723 rack_update_peakrate_thr(struct tcpcb *tp)
725 /* Keep in mind that t_maxpeakrate is in B/s. */
727 peak = uqmax((tp->t_maxseg * 2),
728 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC));
729 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX);
734 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
739 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
740 if (error || req->newptr == NULL)
743 error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
748 printf("Clearing RACK counters\n");
750 counter_u64_zero(rack_tlp_tot);
751 counter_u64_zero(rack_tlp_newdata);
752 counter_u64_zero(rack_tlp_retran);
753 counter_u64_zero(rack_tlp_retran_bytes);
754 counter_u64_zero(rack_to_tot);
755 counter_u64_zero(rack_saw_enobuf);
756 counter_u64_zero(rack_saw_enobuf_hw);
757 counter_u64_zero(rack_saw_enetunreach);
758 counter_u64_zero(rack_persists_sends);
759 counter_u64_zero(rack_persists_acks);
760 counter_u64_zero(rack_persists_loss);
761 counter_u64_zero(rack_persists_lost_ends);
763 counter_u64_zero(rack_adjust_map_bw);
765 counter_u64_zero(rack_to_alloc_hard);
766 counter_u64_zero(rack_to_alloc_emerg);
767 counter_u64_zero(rack_sack_proc_all);
768 counter_u64_zero(rack_fto_send);
769 counter_u64_zero(rack_fto_rsm_send);
770 counter_u64_zero(rack_extended_rfo);
771 counter_u64_zero(rack_hw_pace_init_fail);
772 counter_u64_zero(rack_hw_pace_lost);
773 counter_u64_zero(rack_non_fto_send);
774 counter_u64_zero(rack_nfto_resend);
775 counter_u64_zero(rack_sack_proc_short);
776 counter_u64_zero(rack_sack_proc_restart);
777 counter_u64_zero(rack_to_alloc);
778 counter_u64_zero(rack_to_alloc_limited);
779 counter_u64_zero(rack_alloc_limited_conns);
780 counter_u64_zero(rack_split_limited);
781 counter_u64_zero(rack_multi_single_eq);
782 counter_u64_zero(rack_proc_non_comp_ack);
783 counter_u64_zero(rack_sack_attacks_detected);
784 counter_u64_zero(rack_sack_attacks_reversed);
785 counter_u64_zero(rack_sack_used_next_merge);
786 counter_u64_zero(rack_sack_used_prev_merge);
787 counter_u64_zero(rack_sack_splits);
788 counter_u64_zero(rack_sack_skipped_acked);
789 counter_u64_zero(rack_ack_total);
790 counter_u64_zero(rack_express_sack);
791 counter_u64_zero(rack_sack_total);
792 counter_u64_zero(rack_move_none);
793 counter_u64_zero(rack_move_some);
794 counter_u64_zero(rack_try_scwnd);
795 counter_u64_zero(rack_collapsed_win);
796 counter_u64_zero(rack_collapsed_win_rxt);
797 counter_u64_zero(rack_collapsed_win_seen);
798 counter_u64_zero(rack_collapsed_win_rxt_bytes);
800 rack_clear_counter = 0;
805 rack_init_sysctls(void)
807 struct sysctl_oid *rack_counters;
808 struct sysctl_oid *rack_attack;
809 struct sysctl_oid *rack_pacing;
810 struct sysctl_oid *rack_timely;
811 struct sysctl_oid *rack_timers;
812 struct sysctl_oid *rack_tlp;
813 struct sysctl_oid *rack_misc;
814 struct sysctl_oid *rack_features;
815 struct sysctl_oid *rack_measure;
816 struct sysctl_oid *rack_probertt;
817 struct sysctl_oid *rack_hw_pacing;
818 struct sysctl_oid *rack_tracepoint;
820 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
821 SYSCTL_CHILDREN(rack_sysctl_root),
824 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
825 "Rack Sack Attack Counters and Controls");
826 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
827 SYSCTL_CHILDREN(rack_sysctl_root),
830 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
832 SYSCTL_ADD_S32(&rack_sysctl_ctx,
833 SYSCTL_CHILDREN(rack_sysctl_root),
834 OID_AUTO, "rate_sample_method", CTLFLAG_RW,
835 &rack_rate_sample_method , USE_RTT_LOW,
836 "What method should we use for rate sampling 0=high, 1=low ");
837 /* Probe rtt related controls */
838 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
839 SYSCTL_CHILDREN(rack_sysctl_root),
842 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
843 "ProbeRTT related Controls");
844 SYSCTL_ADD_U16(&rack_sysctl_ctx,
845 SYSCTL_CHILDREN(rack_probertt),
846 OID_AUTO, "exit_per_hpb", CTLFLAG_RW,
847 &rack_atexit_prtt_hbp, 130,
848 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%");
849 SYSCTL_ADD_U16(&rack_sysctl_ctx,
850 SYSCTL_CHILDREN(rack_probertt),
851 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW,
852 &rack_atexit_prtt, 130,
853 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%");
854 SYSCTL_ADD_U16(&rack_sysctl_ctx,
855 SYSCTL_CHILDREN(rack_probertt),
856 OID_AUTO, "gp_per_mul", CTLFLAG_RW,
857 &rack_per_of_gp_probertt, 60,
858 "What percentage of goodput do we pace at in probertt");
859 SYSCTL_ADD_U16(&rack_sysctl_ctx,
860 SYSCTL_CHILDREN(rack_probertt),
861 OID_AUTO, "gp_per_reduce", CTLFLAG_RW,
862 &rack_per_of_gp_probertt_reduce, 10,
863 "What percentage of goodput do we reduce every gp_srtt");
864 SYSCTL_ADD_U16(&rack_sysctl_ctx,
865 SYSCTL_CHILDREN(rack_probertt),
866 OID_AUTO, "gp_per_low", CTLFLAG_RW,
867 &rack_per_of_gp_lowthresh, 40,
868 "What percentage of goodput do we allow the multiplier to fall to");
869 SYSCTL_ADD_U32(&rack_sysctl_ctx,
870 SYSCTL_CHILDREN(rack_probertt),
871 OID_AUTO, "time_between", CTLFLAG_RW,
872 & rack_time_between_probertt, 96000000,
873 "How many useconds between the lowest rtt falling must past before we enter probertt");
874 SYSCTL_ADD_U32(&rack_sysctl_ctx,
875 SYSCTL_CHILDREN(rack_probertt),
876 OID_AUTO, "safety", CTLFLAG_RW,
877 &rack_probe_rtt_safety_val, 2000000,
878 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)");
879 SYSCTL_ADD_U32(&rack_sysctl_ctx,
880 SYSCTL_CHILDREN(rack_probertt),
881 OID_AUTO, "sets_cwnd", CTLFLAG_RW,
882 &rack_probe_rtt_sets_cwnd, 0,
883 "Do we set the cwnd too (if always_lower is on)");
884 SYSCTL_ADD_U32(&rack_sysctl_ctx,
885 SYSCTL_CHILDREN(rack_probertt),
886 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW,
887 &rack_max_drain_wait, 2,
888 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal");
889 SYSCTL_ADD_U32(&rack_sysctl_ctx,
890 SYSCTL_CHILDREN(rack_probertt),
891 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW,
893 "We must drain this many gp_srtt's waiting for flight to reach goal");
894 SYSCTL_ADD_U32(&rack_sysctl_ctx,
895 SYSCTL_CHILDREN(rack_probertt),
896 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW,
897 &rack_probertt_use_min_rtt_entry, 1,
898 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry");
899 SYSCTL_ADD_U32(&rack_sysctl_ctx,
900 SYSCTL_CHILDREN(rack_probertt),
901 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW,
902 &rack_probertt_use_min_rtt_exit, 0,
903 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt");
904 SYSCTL_ADD_U32(&rack_sysctl_ctx,
905 SYSCTL_CHILDREN(rack_probertt),
906 OID_AUTO, "length_div", CTLFLAG_RW,
907 &rack_probertt_gpsrtt_cnt_div, 0,
908 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)");
909 SYSCTL_ADD_U32(&rack_sysctl_ctx,
910 SYSCTL_CHILDREN(rack_probertt),
911 OID_AUTO, "length_mul", CTLFLAG_RW,
912 &rack_probertt_gpsrtt_cnt_mul, 0,
913 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)");
914 SYSCTL_ADD_U32(&rack_sysctl_ctx,
915 SYSCTL_CHILDREN(rack_probertt),
916 OID_AUTO, "holdtim_at_target", CTLFLAG_RW,
917 &rack_min_probertt_hold, 200000,
918 "What is the minimum time we hold probertt at target");
919 SYSCTL_ADD_U32(&rack_sysctl_ctx,
920 SYSCTL_CHILDREN(rack_probertt),
921 OID_AUTO, "filter_life", CTLFLAG_RW,
922 &rack_probertt_filter_life, 10000000,
923 "What is the time for the filters life in useconds");
924 SYSCTL_ADD_U32(&rack_sysctl_ctx,
925 SYSCTL_CHILDREN(rack_probertt),
926 OID_AUTO, "lower_within", CTLFLAG_RW,
927 &rack_probertt_lower_within, 10,
928 "If the rtt goes lower within this percentage of the time, go into probe-rtt");
929 SYSCTL_ADD_U32(&rack_sysctl_ctx,
930 SYSCTL_CHILDREN(rack_probertt),
931 OID_AUTO, "must_move", CTLFLAG_RW,
932 &rack_min_rtt_movement, 250,
933 "How much is the minimum movement in rtt to count as a drop for probertt purposes");
934 SYSCTL_ADD_U32(&rack_sysctl_ctx,
935 SYSCTL_CHILDREN(rack_probertt),
936 OID_AUTO, "clear_is_cnts", CTLFLAG_RW,
937 &rack_probertt_clear_is, 1,
938 "Do we clear I/S counts on exiting probe-rtt");
939 SYSCTL_ADD_S32(&rack_sysctl_ctx,
940 SYSCTL_CHILDREN(rack_probertt),
941 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW,
942 &rack_max_drain_hbp, 1,
943 "How many extra drain gpsrtt's do we get in highly buffered paths");
944 SYSCTL_ADD_S32(&rack_sysctl_ctx,
945 SYSCTL_CHILDREN(rack_probertt),
946 OID_AUTO, "hbp_threshold", CTLFLAG_RW,
948 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold");
950 rack_tracepoint = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
951 SYSCTL_CHILDREN(rack_sysctl_root),
954 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
955 "Rack tracepoint facility");
956 SYSCTL_ADD_U32(&rack_sysctl_ctx,
957 SYSCTL_CHILDREN(rack_tracepoint),
958 OID_AUTO, "number", CTLFLAG_RW,
959 &rack_trace_point_config, 0,
960 "What is the trace point number to activate (0=none, 0xffffffff = all)?");
961 SYSCTL_ADD_U32(&rack_sysctl_ctx,
962 SYSCTL_CHILDREN(rack_tracepoint),
963 OID_AUTO, "bbmode", CTLFLAG_RW,
964 &rack_trace_point_bb_mode, 4,
965 "What is BB logging mode that is activated?");
966 SYSCTL_ADD_S32(&rack_sysctl_ctx,
967 SYSCTL_CHILDREN(rack_tracepoint),
968 OID_AUTO, "count", CTLFLAG_RW,
969 &rack_trace_point_count, 0,
970 "How many connections will have BB logging turned on that hit the tracepoint?");
971 /* Pacing related sysctls */
972 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
973 SYSCTL_CHILDREN(rack_sysctl_root),
976 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
977 "Pacing related Controls");
978 SYSCTL_ADD_S32(&rack_sysctl_ctx,
979 SYSCTL_CHILDREN(rack_pacing),
980 OID_AUTO, "max_pace_over", CTLFLAG_RW,
981 &rack_max_per_above, 30,
982 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)");
983 SYSCTL_ADD_S32(&rack_sysctl_ctx,
984 SYSCTL_CHILDREN(rack_pacing),
985 OID_AUTO, "pace_to_one", CTLFLAG_RW,
986 &rack_pace_one_seg, 0,
987 "Do we allow low b/w pacing of 1MSS instead of two");
988 SYSCTL_ADD_S32(&rack_sysctl_ctx,
989 SYSCTL_CHILDREN(rack_pacing),
990 OID_AUTO, "limit_wsrtt", CTLFLAG_RW,
991 &rack_limit_time_with_srtt, 0,
992 "Do we limit pacing time based on srtt");
993 SYSCTL_ADD_S32(&rack_sysctl_ctx,
994 SYSCTL_CHILDREN(rack_pacing),
995 OID_AUTO, "init_win", CTLFLAG_RW,
996 &rack_default_init_window, 0,
997 "Do we have a rack initial window 0 = system default");
998 SYSCTL_ADD_U16(&rack_sysctl_ctx,
999 SYSCTL_CHILDREN(rack_pacing),
1000 OID_AUTO, "gp_per_ss", CTLFLAG_RW,
1001 &rack_per_of_gp_ss, 250,
1002 "If non zero, what percentage of goodput to pace at in slow start");
1003 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1004 SYSCTL_CHILDREN(rack_pacing),
1005 OID_AUTO, "gp_per_ca", CTLFLAG_RW,
1006 &rack_per_of_gp_ca, 150,
1007 "If non zero, what percentage of goodput to pace at in congestion avoidance");
1008 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1009 SYSCTL_CHILDREN(rack_pacing),
1010 OID_AUTO, "gp_per_rec", CTLFLAG_RW,
1011 &rack_per_of_gp_rec, 200,
1012 "If non zero, what percentage of goodput to pace at in recovery");
1013 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1014 SYSCTL_CHILDREN(rack_pacing),
1015 OID_AUTO, "pace_max_seg", CTLFLAG_RW,
1016 &rack_hptsi_segments, 40,
1017 "What size is the max for TSO segments in pacing and burst mitigation");
1018 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1019 SYSCTL_CHILDREN(rack_pacing),
1020 OID_AUTO, "burst_reduces", CTLFLAG_RW,
1021 &rack_slot_reduction, 4,
1022 "When doing only burst mitigation what is the reduce divisor");
1023 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1024 SYSCTL_CHILDREN(rack_sysctl_root),
1025 OID_AUTO, "use_pacing", CTLFLAG_RW,
1026 &rack_pace_every_seg, 0,
1027 "If set we use pacing, if clear we use only the original burst mitigation");
1028 SYSCTL_ADD_U64(&rack_sysctl_ctx,
1029 SYSCTL_CHILDREN(rack_pacing),
1030 OID_AUTO, "rate_cap", CTLFLAG_RW,
1031 &rack_bw_rate_cap, 0,
1032 "If set we apply this value to the absolute rate cap used by pacing");
1033 SYSCTL_ADD_U8(&rack_sysctl_ctx,
1034 SYSCTL_CHILDREN(rack_sysctl_root),
1035 OID_AUTO, "req_measure_cnt", CTLFLAG_RW,
1036 &rack_req_measurements, 1,
1037 "If doing dynamic pacing, how many measurements must be in before we start pacing?");
1038 /* Hardware pacing */
1039 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1040 SYSCTL_CHILDREN(rack_sysctl_root),
1043 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1044 "Pacing related Controls");
1045 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1046 SYSCTL_CHILDREN(rack_hw_pacing),
1047 OID_AUTO, "rwnd_factor", CTLFLAG_RW,
1048 &rack_hw_rwnd_factor, 2,
1049 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?");
1050 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1051 SYSCTL_CHILDREN(rack_hw_pacing),
1052 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW,
1053 &rack_enobuf_hw_boost_mult, 2,
1054 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?");
1055 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1056 SYSCTL_CHILDREN(rack_hw_pacing),
1057 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW,
1058 &rack_enobuf_hw_max, 2,
1059 "What is the max boost the pacing time if we see a ENOBUFS?");
1060 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1061 SYSCTL_CHILDREN(rack_hw_pacing),
1062 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW,
1063 &rack_enobuf_hw_min, 2,
1064 "What is the min boost the pacing time if we see a ENOBUFS?");
1065 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1066 SYSCTL_CHILDREN(rack_hw_pacing),
1067 OID_AUTO, "enable", CTLFLAG_RW,
1068 &rack_enable_hw_pacing, 0,
1069 "Should RACK attempt to use hw pacing?");
1070 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1071 SYSCTL_CHILDREN(rack_hw_pacing),
1072 OID_AUTO, "rate_cap", CTLFLAG_RW,
1073 &rack_hw_rate_caps, 1,
1074 "Does the highest hardware pacing rate cap the rate we will send at??");
1075 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1076 SYSCTL_CHILDREN(rack_hw_pacing),
1077 OID_AUTO, "rate_min", CTLFLAG_RW,
1078 &rack_hw_rate_min, 0,
1079 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?");
1080 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1081 SYSCTL_CHILDREN(rack_hw_pacing),
1082 OID_AUTO, "rate_to_low", CTLFLAG_RW,
1083 &rack_hw_rate_to_low, 0,
1084 "If we fall below this rate, dis-engage hw pacing?");
1085 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1086 SYSCTL_CHILDREN(rack_hw_pacing),
1087 OID_AUTO, "up_only", CTLFLAG_RW,
1088 &rack_hw_up_only, 1,
1089 "Do we allow hw pacing to lower the rate selected?");
1090 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1091 SYSCTL_CHILDREN(rack_hw_pacing),
1092 OID_AUTO, "extra_mss_precise", CTLFLAG_RW,
1093 &rack_hw_pace_extra_slots, 2,
1094 "If the rates between software and hardware match precisely how many extra time_betweens do we get?");
1095 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1096 SYSCTL_CHILDREN(rack_sysctl_root),
1099 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1100 "Rack Timely RTT Controls");
1101 /* Timely based GP dynmics */
1102 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1103 SYSCTL_CHILDREN(rack_timely),
1104 OID_AUTO, "upper", CTLFLAG_RW,
1105 &rack_gp_per_bw_mul_up, 2,
1106 "Rack timely upper range for equal b/w (in percentage)");
1107 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1108 SYSCTL_CHILDREN(rack_timely),
1109 OID_AUTO, "lower", CTLFLAG_RW,
1110 &rack_gp_per_bw_mul_down, 4,
1111 "Rack timely lower range for equal b/w (in percentage)");
1112 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1113 SYSCTL_CHILDREN(rack_timely),
1114 OID_AUTO, "rtt_max_mul", CTLFLAG_RW,
1115 &rack_gp_rtt_maxmul, 3,
1116 "Rack timely multiplier of lowest rtt for rtt_max");
1117 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1118 SYSCTL_CHILDREN(rack_timely),
1119 OID_AUTO, "rtt_min_div", CTLFLAG_RW,
1120 &rack_gp_rtt_mindiv, 4,
1121 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt");
1122 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1123 SYSCTL_CHILDREN(rack_timely),
1124 OID_AUTO, "rtt_min_mul", CTLFLAG_RW,
1125 &rack_gp_rtt_minmul, 1,
1126 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt");
1127 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1128 SYSCTL_CHILDREN(rack_timely),
1129 OID_AUTO, "decrease", CTLFLAG_RW,
1130 &rack_gp_decrease_per, 20,
1131 "Rack timely decrease percentage of our GP multiplication factor");
1132 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1133 SYSCTL_CHILDREN(rack_timely),
1134 OID_AUTO, "increase", CTLFLAG_RW,
1135 &rack_gp_increase_per, 2,
1136 "Rack timely increase perentage of our GP multiplication factor");
1137 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1138 SYSCTL_CHILDREN(rack_timely),
1139 OID_AUTO, "lowerbound", CTLFLAG_RW,
1140 &rack_per_lower_bound, 50,
1141 "Rack timely lowest percentage we allow GP multiplier to fall to");
1142 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1143 SYSCTL_CHILDREN(rack_timely),
1144 OID_AUTO, "upperboundss", CTLFLAG_RW,
1145 &rack_per_upper_bound_ss, 0,
1146 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)");
1147 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1148 SYSCTL_CHILDREN(rack_timely),
1149 OID_AUTO, "upperboundca", CTLFLAG_RW,
1150 &rack_per_upper_bound_ca, 0,
1151 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)");
1152 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1153 SYSCTL_CHILDREN(rack_timely),
1154 OID_AUTO, "dynamicgp", CTLFLAG_RW,
1155 &rack_do_dyn_mul, 0,
1156 "Rack timely do we enable dynmaic timely goodput by default");
1157 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1158 SYSCTL_CHILDREN(rack_timely),
1159 OID_AUTO, "no_rec_red", CTLFLAG_RW,
1160 &rack_gp_no_rec_chg, 1,
1161 "Rack timely do we prohibit the recovery multiplier from being lowered");
1162 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1163 SYSCTL_CHILDREN(rack_timely),
1164 OID_AUTO, "red_clear_cnt", CTLFLAG_RW,
1165 &rack_timely_dec_clear, 6,
1166 "Rack timely what threshold do we count to before another boost during b/w decent");
1167 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1168 SYSCTL_CHILDREN(rack_timely),
1169 OID_AUTO, "max_push_rise", CTLFLAG_RW,
1170 &rack_timely_max_push_rise, 3,
1171 "Rack timely how many times do we push up with b/w increase");
1172 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1173 SYSCTL_CHILDREN(rack_timely),
1174 OID_AUTO, "max_push_drop", CTLFLAG_RW,
1175 &rack_timely_max_push_drop, 3,
1176 "Rack timely how many times do we push back on b/w decent");
1177 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1178 SYSCTL_CHILDREN(rack_timely),
1179 OID_AUTO, "min_segs", CTLFLAG_RW,
1180 &rack_timely_min_segs, 4,
1181 "Rack timely when setting the cwnd what is the min num segments");
1182 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1183 SYSCTL_CHILDREN(rack_timely),
1184 OID_AUTO, "noback_max", CTLFLAG_RW,
1185 &rack_use_max_for_nobackoff, 0,
1186 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min");
1187 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1188 SYSCTL_CHILDREN(rack_timely),
1189 OID_AUTO, "interim_timely_only", CTLFLAG_RW,
1190 &rack_timely_int_timely_only, 0,
1191 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)");
1192 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1193 SYSCTL_CHILDREN(rack_timely),
1194 OID_AUTO, "nonstop", CTLFLAG_RW,
1195 &rack_timely_no_stopping, 0,
1196 "Rack timely don't stop increase");
1197 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1198 SYSCTL_CHILDREN(rack_timely),
1199 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW,
1200 &rack_down_raise_thresh, 100,
1201 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)");
1202 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1203 SYSCTL_CHILDREN(rack_timely),
1204 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW,
1206 "Bottom dragging if not these many segments outstanding and room");
1208 /* TLP and Rack related parameters */
1209 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1210 SYSCTL_CHILDREN(rack_sysctl_root),
1213 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1214 "TLP and Rack related Controls");
1215 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1216 SYSCTL_CHILDREN(rack_tlp),
1217 OID_AUTO, "use_rrr", CTLFLAG_RW,
1219 "Do we use Rack Rapid Recovery");
1220 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1221 SYSCTL_CHILDREN(rack_tlp),
1222 OID_AUTO, "post_rec_labc", CTLFLAG_RW,
1223 &rack_max_abc_post_recovery, 2,
1224 "Since we do early recovery, do we override the l_abc to a value, if so what?");
1225 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1226 SYSCTL_CHILDREN(rack_tlp),
1227 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW,
1228 &rack_non_rxt_use_cr, 0,
1229 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk");
1230 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1231 SYSCTL_CHILDREN(rack_tlp),
1232 OID_AUTO, "tlpmethod", CTLFLAG_RW,
1233 &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
1234 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
1235 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1236 SYSCTL_CHILDREN(rack_tlp),
1237 OID_AUTO, "limit", CTLFLAG_RW,
1239 "How many TLP's can be sent without sending new data");
1240 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1241 SYSCTL_CHILDREN(rack_tlp),
1242 OID_AUTO, "use_greater", CTLFLAG_RW,
1243 &rack_tlp_use_greater, 1,
1244 "Should we use the rack_rtt time if its greater than srtt");
1245 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1246 SYSCTL_CHILDREN(rack_tlp),
1247 OID_AUTO, "tlpminto", CTLFLAG_RW,
1248 &rack_tlp_min, 10000,
1249 "TLP minimum timeout per the specification (in microseconds)");
1250 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1251 SYSCTL_CHILDREN(rack_tlp),
1252 OID_AUTO, "send_oldest", CTLFLAG_RW,
1253 &rack_always_send_oldest, 0,
1254 "Should we always send the oldest TLP and RACK-TLP");
1255 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1256 SYSCTL_CHILDREN(rack_tlp),
1257 OID_AUTO, "rack_tlimit", CTLFLAG_RW,
1258 &rack_limited_retran, 0,
1259 "How many times can a rack timeout drive out sends");
1260 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1261 SYSCTL_CHILDREN(rack_tlp),
1262 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
1263 &rack_lower_cwnd_at_tlp, 0,
1264 "When a TLP completes a retran should we enter recovery");
1265 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1266 SYSCTL_CHILDREN(rack_tlp),
1267 OID_AUTO, "reorder_thresh", CTLFLAG_RW,
1268 &rack_reorder_thresh, 2,
1269 "What factor for rack will be added when seeing reordering (shift right)");
1270 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1271 SYSCTL_CHILDREN(rack_tlp),
1272 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
1273 &rack_tlp_thresh, 1,
1274 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
1275 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1276 SYSCTL_CHILDREN(rack_tlp),
1277 OID_AUTO, "reorder_fade", CTLFLAG_RW,
1278 &rack_reorder_fade, 60000000,
1279 "Does reorder detection fade, if so how many microseconds (0 means never)");
1280 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1281 SYSCTL_CHILDREN(rack_tlp),
1282 OID_AUTO, "pktdelay", CTLFLAG_RW,
1283 &rack_pkt_delay, 1000,
1284 "Extra RACK time (in microseconds) besides reordering thresh");
1286 /* Timer related controls */
1287 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1288 SYSCTL_CHILDREN(rack_sysctl_root),
1291 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1292 "Timer related controls");
1293 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1294 SYSCTL_CHILDREN(rack_timers),
1295 OID_AUTO, "persmin", CTLFLAG_RW,
1296 &rack_persist_min, 250000,
1297 "What is the minimum time in microseconds between persists");
1298 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1299 SYSCTL_CHILDREN(rack_timers),
1300 OID_AUTO, "persmax", CTLFLAG_RW,
1301 &rack_persist_max, 2000000,
1302 "What is the largest delay in microseconds between persists");
1303 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1304 SYSCTL_CHILDREN(rack_timers),
1305 OID_AUTO, "delayed_ack", CTLFLAG_RW,
1306 &rack_delayed_ack_time, 40000,
1307 "Delayed ack time (40ms in microseconds)");
1308 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1309 SYSCTL_CHILDREN(rack_timers),
1310 OID_AUTO, "minrto", CTLFLAG_RW,
1311 &rack_rto_min, 30000,
1312 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP");
1313 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1314 SYSCTL_CHILDREN(rack_timers),
1315 OID_AUTO, "maxrto", CTLFLAG_RW,
1316 &rack_rto_max, 4000000,
1317 "Maximum RTO in microseconds -- should be at least as large as min_rto");
1318 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1319 SYSCTL_CHILDREN(rack_timers),
1320 OID_AUTO, "minto", CTLFLAG_RW,
1322 "Minimum rack timeout in microseconds");
1323 /* Measure controls */
1324 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1325 SYSCTL_CHILDREN(rack_sysctl_root),
1328 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1329 "Measure related controls");
1330 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1331 SYSCTL_CHILDREN(rack_measure),
1332 OID_AUTO, "wma_divisor", CTLFLAG_RW,
1333 &rack_wma_divisor, 8,
1334 "When doing b/w calculation what is the divisor for the WMA");
1335 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1336 SYSCTL_CHILDREN(rack_measure),
1337 OID_AUTO, "end_cwnd", CTLFLAG_RW,
1338 &rack_cwnd_block_ends_measure, 0,
1339 "Does a cwnd just-return end the measurement window (app limited)");
1340 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1341 SYSCTL_CHILDREN(rack_measure),
1342 OID_AUTO, "end_rwnd", CTLFLAG_RW,
1343 &rack_rwnd_block_ends_measure, 0,
1344 "Does an rwnd just-return end the measurement window (app limited -- not persists)");
1345 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1346 SYSCTL_CHILDREN(rack_measure),
1347 OID_AUTO, "min_target", CTLFLAG_RW,
1348 &rack_def_data_window, 20,
1349 "What is the minimum target window (in mss) for a GP measurements");
1350 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1351 SYSCTL_CHILDREN(rack_measure),
1352 OID_AUTO, "goal_bdp", CTLFLAG_RW,
1354 "What is the goal BDP to measure");
1355 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1356 SYSCTL_CHILDREN(rack_measure),
1357 OID_AUTO, "min_srtts", CTLFLAG_RW,
1359 "What is the goal BDP to measure");
1360 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1361 SYSCTL_CHILDREN(rack_measure),
1362 OID_AUTO, "min_measure_tim", CTLFLAG_RW,
1363 &rack_min_measure_usec, 0,
1364 "What is the Minimum time time for a measurement if 0, this is off");
1366 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1367 SYSCTL_CHILDREN(rack_sysctl_root),
1370 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1371 "Feature controls");
1372 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1373 SYSCTL_CHILDREN(rack_features),
1374 OID_AUTO, "cmpack", CTLFLAG_RW,
1375 &rack_use_cmp_acks, 1,
1376 "Should RACK have LRO send compressed acks");
1377 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1378 SYSCTL_CHILDREN(rack_features),
1379 OID_AUTO, "fsb", CTLFLAG_RW,
1381 "Should RACK use the fast send block?");
1382 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1383 SYSCTL_CHILDREN(rack_features),
1384 OID_AUTO, "rfo", CTLFLAG_RW,
1386 "Should RACK use rack_fast_output()?");
1387 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1388 SYSCTL_CHILDREN(rack_features),
1389 OID_AUTO, "rsmrfo", CTLFLAG_RW,
1390 &rack_use_rsm_rfo, 1,
1391 "Should RACK use rack_fast_rsm_output()?");
1392 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1393 SYSCTL_CHILDREN(rack_features),
1394 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW,
1395 &rack_enable_mqueue_for_nonpaced, 0,
1396 "Should RACK use mbuf queuing for non-paced connections");
1397 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1398 SYSCTL_CHILDREN(rack_features),
1399 OID_AUTO, "hystartplusplus", CTLFLAG_RW,
1400 &rack_do_hystart, 0,
1401 "Should RACK enable HyStart++ on connections?");
1402 /* Misc rack controls */
1403 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1404 SYSCTL_CHILDREN(rack_sysctl_root),
1407 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1408 "Misc related controls");
1409 #ifdef TCP_ACCOUNTING
1410 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1411 SYSCTL_CHILDREN(rack_misc),
1412 OID_AUTO, "tcp_acct", CTLFLAG_RW,
1413 &rack_tcp_accounting, 0,
1414 "Should we turn on TCP accounting for all rack sessions?");
1416 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1417 SYSCTL_CHILDREN(rack_misc),
1418 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW,
1419 &rack_apply_rtt_with_reduced_conf, 0,
1420 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?");
1421 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1422 SYSCTL_CHILDREN(rack_misc),
1423 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW,
1424 &rack_dsack_std_based, 3,
1425 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?");
1426 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1427 SYSCTL_CHILDREN(rack_misc),
1428 OID_AUTO, "prr_addback_max", CTLFLAG_RW,
1429 &rack_prr_addbackmax, 2,
1430 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?");
1431 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1432 SYSCTL_CHILDREN(rack_misc),
1433 OID_AUTO, "stats_gets_ms", CTLFLAG_RW,
1434 &rack_stats_gets_ms_rtt, 1,
1435 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?");
1436 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1437 SYSCTL_CHILDREN(rack_misc),
1438 OID_AUTO, "clientlowbuf", CTLFLAG_RW,
1439 &rack_client_low_buf, 0,
1440 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?");
1441 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1442 SYSCTL_CHILDREN(rack_misc),
1443 OID_AUTO, "defprofile", CTLFLAG_RW,
1444 &rack_def_profile, 0,
1445 "Should RACK use a default profile (0=no, num == profile num)?");
1446 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1447 SYSCTL_CHILDREN(rack_misc),
1448 OID_AUTO, "shared_cwnd", CTLFLAG_RW,
1449 &rack_enable_shared_cwnd, 1,
1450 "Should RACK try to use the shared cwnd on connections where allowed");
1451 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1452 SYSCTL_CHILDREN(rack_misc),
1453 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW,
1454 &rack_limits_scwnd, 1,
1455 "Should RACK place low end time limits on the shared cwnd feature");
1456 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1457 SYSCTL_CHILDREN(rack_misc),
1458 OID_AUTO, "iMac_dack", CTLFLAG_RW,
1459 &rack_use_imac_dack, 0,
1460 "Should RACK try to emulate iMac delayed ack");
1461 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1462 SYSCTL_CHILDREN(rack_misc),
1463 OID_AUTO, "no_prr", CTLFLAG_RW,
1464 &rack_disable_prr, 0,
1465 "Should RACK not use prr and only pace (must have pacing on)");
1466 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1467 SYSCTL_CHILDREN(rack_misc),
1468 OID_AUTO, "bb_verbose", CTLFLAG_RW,
1469 &rack_verbose_logging, 0,
1470 "Should RACK black box logging be verbose");
1471 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1472 SYSCTL_CHILDREN(rack_misc),
1473 OID_AUTO, "data_after_close", CTLFLAG_RW,
1474 &rack_ignore_data_after_close, 1,
1475 "Do we hold off sending a RST until all pending data is ack'd");
1476 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1477 SYSCTL_CHILDREN(rack_misc),
1478 OID_AUTO, "no_sack_needed", CTLFLAG_RW,
1479 &rack_sack_not_required, 1,
1480 "Do we allow rack to run on connections not supporting SACK");
1481 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1482 SYSCTL_CHILDREN(rack_misc),
1483 OID_AUTO, "prr_sendalot", CTLFLAG_RW,
1484 &rack_send_a_lot_in_prr, 1,
1485 "Send a lot in prr");
1486 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1487 SYSCTL_CHILDREN(rack_misc),
1488 OID_AUTO, "autoscale", CTLFLAG_RW,
1489 &rack_autosndbuf_inc, 20,
1490 "What percentage should rack scale up its snd buffer by?");
1491 /* Sack Attacker detection stuff */
1492 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1493 SYSCTL_CHILDREN(rack_attack),
1494 OID_AUTO, "detect_highsackratio", CTLFLAG_RW,
1495 &rack_highest_sack_thresh_seen, 0,
1496 "Highest sack to ack ratio seen");
1497 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1498 SYSCTL_CHILDREN(rack_attack),
1499 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW,
1500 &rack_highest_move_thresh_seen, 0,
1501 "Highest move to non-move ratio seen");
1502 rack_ack_total = counter_u64_alloc(M_WAITOK);
1503 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1504 SYSCTL_CHILDREN(rack_attack),
1505 OID_AUTO, "acktotal", CTLFLAG_RD,
1507 "Total number of Ack's");
1508 rack_express_sack = counter_u64_alloc(M_WAITOK);
1509 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1510 SYSCTL_CHILDREN(rack_attack),
1511 OID_AUTO, "exp_sacktotal", CTLFLAG_RD,
1513 "Total expresss number of Sack's");
1514 rack_sack_total = counter_u64_alloc(M_WAITOK);
1515 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1516 SYSCTL_CHILDREN(rack_attack),
1517 OID_AUTO, "sacktotal", CTLFLAG_RD,
1519 "Total number of SACKs");
1520 rack_move_none = counter_u64_alloc(M_WAITOK);
1521 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1522 SYSCTL_CHILDREN(rack_attack),
1523 OID_AUTO, "move_none", CTLFLAG_RD,
1525 "Total number of SACK index reuse of positions under threshold");
1526 rack_move_some = counter_u64_alloc(M_WAITOK);
1527 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1528 SYSCTL_CHILDREN(rack_attack),
1529 OID_AUTO, "move_some", CTLFLAG_RD,
1531 "Total number of SACK index reuse of positions over threshold");
1532 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK);
1533 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1534 SYSCTL_CHILDREN(rack_attack),
1535 OID_AUTO, "attacks", CTLFLAG_RD,
1536 &rack_sack_attacks_detected,
1537 "Total number of SACK attackers that had sack disabled");
1538 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK);
1539 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1540 SYSCTL_CHILDREN(rack_attack),
1541 OID_AUTO, "reversed", CTLFLAG_RD,
1542 &rack_sack_attacks_reversed,
1543 "Total number of SACK attackers that were later determined false positive");
1544 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK);
1545 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1546 SYSCTL_CHILDREN(rack_attack),
1547 OID_AUTO, "nextmerge", CTLFLAG_RD,
1548 &rack_sack_used_next_merge,
1549 "Total number of times we used the next merge");
1550 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK);
1551 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1552 SYSCTL_CHILDREN(rack_attack),
1553 OID_AUTO, "prevmerge", CTLFLAG_RD,
1554 &rack_sack_used_prev_merge,
1555 "Total number of times we used the prev merge");
1557 rack_fto_send = counter_u64_alloc(M_WAITOK);
1558 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1559 SYSCTL_CHILDREN(rack_counters),
1560 OID_AUTO, "fto_send", CTLFLAG_RD,
1561 &rack_fto_send, "Total number of rack_fast_output sends");
1562 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK);
1563 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1564 SYSCTL_CHILDREN(rack_counters),
1565 OID_AUTO, "fto_rsm_send", CTLFLAG_RD,
1566 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends");
1567 rack_nfto_resend = counter_u64_alloc(M_WAITOK);
1568 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1569 SYSCTL_CHILDREN(rack_counters),
1570 OID_AUTO, "nfto_resend", CTLFLAG_RD,
1571 &rack_nfto_resend, "Total number of rack_output retransmissions");
1572 rack_non_fto_send = counter_u64_alloc(M_WAITOK);
1573 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1574 SYSCTL_CHILDREN(rack_counters),
1575 OID_AUTO, "nfto_send", CTLFLAG_RD,
1576 &rack_non_fto_send, "Total number of rack_output first sends");
1577 rack_extended_rfo = counter_u64_alloc(M_WAITOK);
1578 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1579 SYSCTL_CHILDREN(rack_counters),
1580 OID_AUTO, "rfo_extended", CTLFLAG_RD,
1581 &rack_extended_rfo, "Total number of times we extended rfo");
1583 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK);
1584 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1585 SYSCTL_CHILDREN(rack_counters),
1586 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD,
1587 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing");
1588 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK);
1590 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1591 SYSCTL_CHILDREN(rack_counters),
1592 OID_AUTO, "hwpace_lost", CTLFLAG_RD,
1593 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing");
1594 rack_tlp_tot = counter_u64_alloc(M_WAITOK);
1595 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1596 SYSCTL_CHILDREN(rack_counters),
1597 OID_AUTO, "tlp_to_total", CTLFLAG_RD,
1599 "Total number of tail loss probe expirations");
1600 rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
1601 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1602 SYSCTL_CHILDREN(rack_counters),
1603 OID_AUTO, "tlp_new", CTLFLAG_RD,
1605 "Total number of tail loss probe sending new data");
1606 rack_tlp_retran = counter_u64_alloc(M_WAITOK);
1607 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1608 SYSCTL_CHILDREN(rack_counters),
1609 OID_AUTO, "tlp_retran", CTLFLAG_RD,
1611 "Total number of tail loss probe sending retransmitted data");
1612 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
1613 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1614 SYSCTL_CHILDREN(rack_counters),
1615 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
1616 &rack_tlp_retran_bytes,
1617 "Total bytes of tail loss probe sending retransmitted data");
1618 rack_to_tot = counter_u64_alloc(M_WAITOK);
1619 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1620 SYSCTL_CHILDREN(rack_counters),
1621 OID_AUTO, "rack_to_tot", CTLFLAG_RD,
1623 "Total number of times the rack to expired");
1624 rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
1625 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1626 SYSCTL_CHILDREN(rack_counters),
1627 OID_AUTO, "saw_enobufs", CTLFLAG_RD,
1629 "Total number of times a sends returned enobuf for non-hdwr paced connections");
1630 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK);
1631 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1632 SYSCTL_CHILDREN(rack_counters),
1633 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD,
1634 &rack_saw_enobuf_hw,
1635 "Total number of times a send returned enobuf for hdwr paced connections");
1636 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
1637 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1638 SYSCTL_CHILDREN(rack_counters),
1639 OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
1640 &rack_saw_enetunreach,
1641 "Total number of times a send received a enetunreachable");
1642 rack_hot_alloc = counter_u64_alloc(M_WAITOK);
1643 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1644 SYSCTL_CHILDREN(rack_counters),
1645 OID_AUTO, "alloc_hot", CTLFLAG_RD,
1647 "Total allocations from the top of our list");
1648 rack_to_alloc = counter_u64_alloc(M_WAITOK);
1649 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1650 SYSCTL_CHILDREN(rack_counters),
1651 OID_AUTO, "allocs", CTLFLAG_RD,
1653 "Total allocations of tracking structures");
1654 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
1655 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1656 SYSCTL_CHILDREN(rack_counters),
1657 OID_AUTO, "allochard", CTLFLAG_RD,
1658 &rack_to_alloc_hard,
1659 "Total allocations done with sleeping the hard way");
1660 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
1661 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1662 SYSCTL_CHILDREN(rack_counters),
1663 OID_AUTO, "allocemerg", CTLFLAG_RD,
1664 &rack_to_alloc_emerg,
1665 "Total allocations done from emergency cache");
1666 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK);
1667 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1668 SYSCTL_CHILDREN(rack_counters),
1669 OID_AUTO, "alloc_limited", CTLFLAG_RD,
1670 &rack_to_alloc_limited,
1671 "Total allocations dropped due to limit");
1672 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK);
1673 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1674 SYSCTL_CHILDREN(rack_counters),
1675 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD,
1676 &rack_alloc_limited_conns,
1677 "Connections with allocations dropped due to limit");
1678 rack_split_limited = counter_u64_alloc(M_WAITOK);
1679 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1680 SYSCTL_CHILDREN(rack_counters),
1681 OID_AUTO, "split_limited", CTLFLAG_RD,
1682 &rack_split_limited,
1683 "Split allocations dropped due to limit");
1684 rack_persists_sends = counter_u64_alloc(M_WAITOK);
1685 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1686 SYSCTL_CHILDREN(rack_counters),
1687 OID_AUTO, "persist_sends", CTLFLAG_RD,
1688 &rack_persists_sends,
1689 "Number of times we sent a persist probe");
1690 rack_persists_acks = counter_u64_alloc(M_WAITOK);
1691 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1692 SYSCTL_CHILDREN(rack_counters),
1693 OID_AUTO, "persist_acks", CTLFLAG_RD,
1694 &rack_persists_acks,
1695 "Number of times a persist probe was acked");
1696 rack_persists_loss = counter_u64_alloc(M_WAITOK);
1697 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1698 SYSCTL_CHILDREN(rack_counters),
1699 OID_AUTO, "persist_loss", CTLFLAG_RD,
1700 &rack_persists_loss,
1701 "Number of times we detected a lost persist probe (no ack)");
1702 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK);
1703 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1704 SYSCTL_CHILDREN(rack_counters),
1705 OID_AUTO, "persist_loss_ends", CTLFLAG_RD,
1706 &rack_persists_lost_ends,
1707 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort");
1709 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK);
1710 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1711 SYSCTL_CHILDREN(rack_counters),
1712 OID_AUTO, "map_adjust_req", CTLFLAG_RD,
1713 &rack_adjust_map_bw,
1714 "Number of times we hit the case where the sb went up and down on a sendmap entry");
1716 rack_multi_single_eq = counter_u64_alloc(M_WAITOK);
1717 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1718 SYSCTL_CHILDREN(rack_counters),
1719 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD,
1720 &rack_multi_single_eq,
1721 "Number of compressed acks total represented");
1722 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK);
1723 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1724 SYSCTL_CHILDREN(rack_counters),
1725 OID_AUTO, "cmp_ack_not", CTLFLAG_RD,
1726 &rack_proc_non_comp_ack,
1727 "Number of non compresseds acks that we processed");
1730 rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
1731 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1732 SYSCTL_CHILDREN(rack_counters),
1733 OID_AUTO, "sack_long", CTLFLAG_RD,
1734 &rack_sack_proc_all,
1735 "Total times we had to walk whole list for sack processing");
1736 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
1737 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1738 SYSCTL_CHILDREN(rack_counters),
1739 OID_AUTO, "sack_restart", CTLFLAG_RD,
1740 &rack_sack_proc_restart,
1741 "Total times we had to walk whole list due to a restart");
1742 rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
1743 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1744 SYSCTL_CHILDREN(rack_counters),
1745 OID_AUTO, "sack_short", CTLFLAG_RD,
1746 &rack_sack_proc_short,
1747 "Total times we took shortcut for sack processing");
1748 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK);
1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1750 SYSCTL_CHILDREN(rack_attack),
1751 OID_AUTO, "skipacked", CTLFLAG_RD,
1752 &rack_sack_skipped_acked,
1753 "Total number of times we skipped previously sacked");
1754 rack_sack_splits = counter_u64_alloc(M_WAITOK);
1755 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1756 SYSCTL_CHILDREN(rack_attack),
1757 OID_AUTO, "ofsplit", CTLFLAG_RD,
1759 "Total number of times we did the old fashion tree split");
1760 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
1761 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1762 SYSCTL_CHILDREN(rack_counters),
1763 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
1764 &rack_input_idle_reduces,
1765 "Total number of idle reductions on input");
1766 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK);
1767 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1768 SYSCTL_CHILDREN(rack_counters),
1769 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD,
1770 &rack_collapsed_win_seen,
1771 "Total number of collapsed window events seen (where our window shrinks)");
1773 rack_collapsed_win = counter_u64_alloc(M_WAITOK);
1774 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1775 SYSCTL_CHILDREN(rack_counters),
1776 OID_AUTO, "collapsed_win", CTLFLAG_RD,
1777 &rack_collapsed_win,
1778 "Total number of collapsed window events where we mark packets");
1779 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK);
1780 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1781 SYSCTL_CHILDREN(rack_counters),
1782 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD,
1783 &rack_collapsed_win_rxt,
1784 "Total number of packets that were retransmitted");
1785 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK);
1786 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1787 SYSCTL_CHILDREN(rack_counters),
1788 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD,
1789 &rack_collapsed_win_rxt_bytes,
1790 "Total number of bytes that were retransmitted");
1791 rack_try_scwnd = counter_u64_alloc(M_WAITOK);
1792 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1793 SYSCTL_CHILDREN(rack_counters),
1794 OID_AUTO, "tried_scwnd", CTLFLAG_RD,
1796 "Total number of scwnd attempts");
1797 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
1798 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1799 OID_AUTO, "outsize", CTLFLAG_RD,
1800 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
1801 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
1802 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1803 OID_AUTO, "opts", CTLFLAG_RD,
1804 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
1805 SYSCTL_ADD_PROC(&rack_sysctl_ctx,
1806 SYSCTL_CHILDREN(rack_sysctl_root),
1807 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1808 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
1812 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a)
1814 if (SEQ_GEQ(b->r_start, a->r_start) &&
1815 SEQ_LT(b->r_start, a->r_end)) {
1817 * The entry b is within the
1819 * a -- |-------------|
1824 * b -- |-----------|
1827 } else if (SEQ_GEQ(b->r_start, a->r_end)) {
1829 * b falls as either the next
1830 * sequence block after a so a
1831 * is said to be smaller than b.
1841 * Whats left is where a is
1842 * larger than b. i.e:
1846 * b -- |--------------|
1851 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1852 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1855 rc_init_window(struct tcp_rack *rack)
1859 if (rack->rc_init_win == 0) {
1861 * Nothing set by the user, use the system stack
1864 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)));
1866 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win;
1871 rack_get_fixed_pacing_bw(struct tcp_rack *rack)
1873 if (IN_FASTRECOVERY(rack->rc_tp->t_flags))
1874 return (rack->r_ctl.rc_fixed_pacing_rate_rec);
1875 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
1876 return (rack->r_ctl.rc_fixed_pacing_rate_ss);
1878 return (rack->r_ctl.rc_fixed_pacing_rate_ca);
1882 rack_get_bw(struct tcp_rack *rack)
1884 if (rack->use_fixed_rate) {
1885 /* Return the fixed pacing rate */
1886 return (rack_get_fixed_pacing_bw(rack));
1888 if (rack->r_ctl.gp_bw == 0) {
1890 * We have yet no b/w measurement,
1891 * if we have a user set initial bw
1892 * return it. If we don't have that and
1893 * we have an srtt, use the tcp IW (10) to
1894 * calculate a fictional b/w over the SRTT
1895 * which is more or less a guess. Note
1896 * we don't use our IW from rack on purpose
1897 * so if we have like IW=30, we are not
1898 * calculating a "huge" b/w.
1901 if (rack->r_ctl.init_rate)
1902 return (rack->r_ctl.init_rate);
1904 /* Has the user set a max peak rate? */
1905 #ifdef NETFLIX_PEAKRATE
1906 if (rack->rc_tp->t_maxpeakrate)
1907 return (rack->rc_tp->t_maxpeakrate);
1909 /* Ok lets come up with the IW guess, if we have a srtt */
1910 if (rack->rc_tp->t_srtt == 0) {
1912 * Go with old pacing method
1913 * i.e. burst mitigation only.
1917 /* Ok lets get the initial TCP win (not racks) */
1918 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp));
1919 srtt = (uint64_t)rack->rc_tp->t_srtt;
1920 bw *= (uint64_t)USECS_IN_SECOND;
1922 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap))
1923 bw = rack->r_ctl.bw_rate_cap;
1928 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
1929 /* Averaging is done, we can return the value */
1930 bw = rack->r_ctl.gp_bw;
1932 /* Still doing initial average must calculate */
1933 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements;
1935 #ifdef NETFLIX_PEAKRATE
1936 if ((rack->rc_tp->t_maxpeakrate) &&
1937 (bw > rack->rc_tp->t_maxpeakrate)) {
1938 /* The user has set a peak rate to pace at
1939 * don't allow us to pace faster than that.
1941 return (rack->rc_tp->t_maxpeakrate);
1944 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap))
1945 bw = rack->r_ctl.bw_rate_cap;
1951 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm)
1953 if (rack->use_fixed_rate) {
1955 } else if (rack->in_probe_rtt && (rsm == NULL))
1956 return (rack->r_ctl.rack_per_of_gp_probertt);
1957 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
1958 rack->r_ctl.rack_per_of_gp_rec)) {
1960 /* a retransmission always use the recovery rate */
1961 return (rack->r_ctl.rack_per_of_gp_rec);
1962 } else if (rack->rack_rec_nonrxt_use_cr) {
1963 /* Directed to use the configured rate */
1964 goto configured_rate;
1965 } else if (rack->rack_no_prr &&
1966 (rack->r_ctl.rack_per_of_gp_rec > 100)) {
1967 /* No PRR, lets just use the b/w estimate only */
1971 * Here we may have a non-retransmit but we
1972 * have no overrides, so just use the recovery
1973 * rate (prr is in effect).
1975 return (rack->r_ctl.rack_per_of_gp_rec);
1979 /* For the configured rate we look at our cwnd vs the ssthresh */
1980 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
1981 return (rack->r_ctl.rack_per_of_gp_ss);
1983 return (rack->r_ctl.rack_per_of_gp_ca);
1987 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6)
1990 * Types of logs (mod value)
1991 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit.
1992 * 2 = a dsack round begins, persist is reset to 16.
1993 * 3 = a dsack round ends
1994 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh
1995 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack
1996 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh.
1998 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1999 union tcp_log_stackspecific log;
2002 memset(&log, 0, sizeof(log));
2003 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based;
2004 log.u_bbr.flex1 <<= 1;
2005 log.u_bbr.flex1 |= rack->rc_rack_use_dsack;
2006 log.u_bbr.flex1 <<= 1;
2007 log.u_bbr.flex1 |= rack->rc_dsack_round_seen;
2008 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end;
2009 log.u_bbr.flex3 = rack->r_ctl.num_dsack;
2010 log.u_bbr.flex4 = flex4;
2011 log.u_bbr.flex5 = flex5;
2012 log.u_bbr.flex6 = flex6;
2013 log.u_bbr.flex7 = rack->r_ctl.dsack_persist;
2014 log.u_bbr.flex8 = mod;
2015 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2016 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2017 &rack->rc_inp->inp_socket->so_rcv,
2018 &rack->rc_inp->inp_socket->so_snd,
2019 RACK_DSACK_HANDLING, 0,
2020 0, &log, false, &tv);
2025 rack_log_hdwr_pacing(struct tcp_rack *rack,
2026 uint64_t rate, uint64_t hw_rate, int line,
2027 int error, uint16_t mod)
2029 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2030 union tcp_log_stackspecific log;
2032 const struct ifnet *ifp;
2034 memset(&log, 0, sizeof(log));
2035 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff);
2036 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff);
2037 if (rack->r_ctl.crte) {
2038 ifp = rack->r_ctl.crte->ptbl->rs_ifp;
2039 } else if (rack->rc_inp->inp_route.ro_nh &&
2040 rack->rc_inp->inp_route.ro_nh->nh_ifp) {
2041 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp;
2045 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff);
2046 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff);
2048 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2049 log.u_bbr.bw_inuse = rate;
2050 log.u_bbr.flex5 = line;
2051 log.u_bbr.flex6 = error;
2052 log.u_bbr.flex7 = mod;
2053 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs;
2054 log.u_bbr.flex8 = rack->use_fixed_rate;
2055 log.u_bbr.flex8 <<= 1;
2056 log.u_bbr.flex8 |= rack->rack_hdrw_pacing;
2057 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
2058 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate;
2059 if (rack->r_ctl.crte)
2060 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate;
2062 log.u_bbr.cur_del_rate = 0;
2063 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req;
2064 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2065 &rack->rc_inp->inp_socket->so_rcv,
2066 &rack->rc_inp->inp_socket->so_snd,
2067 BBR_LOG_HDWR_PACE, 0,
2068 0, &log, false, &tv);
2073 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped)
2076 * We allow rack_per_of_gp_xx to dictate our bw rate we want.
2078 uint64_t bw_est, high_rate;
2081 gain = (uint64_t)rack_get_output_gain(rack, rsm);
2083 bw_est /= (uint64_t)100;
2084 /* Never fall below the minimum (def 64kbps) */
2085 if (bw_est < RACK_MIN_BW)
2086 bw_est = RACK_MIN_BW;
2087 if (rack->r_rack_hw_rate_caps) {
2088 /* Rate caps are in place */
2089 if (rack->r_ctl.crte != NULL) {
2090 /* We have a hdwr rate already */
2091 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
2092 if (bw_est >= high_rate) {
2093 /* We are capping bw at the highest rate table entry */
2094 rack_log_hdwr_pacing(rack,
2095 bw_est, high_rate, __LINE__,
2101 } else if ((rack->rack_hdrw_pacing == 0) &&
2102 (rack->rack_hdw_pace_ena) &&
2103 (rack->rack_attempt_hdwr_pace == 0) &&
2104 (rack->rc_inp->inp_route.ro_nh != NULL) &&
2105 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
2107 * Special case, we have not yet attempted hardware
2108 * pacing, and yet we may, when we do, find out if we are
2109 * above the highest rate. We need to know the maxbw for the interface
2110 * in question (if it supports ratelimiting). We get back
2111 * a 0, if the interface is not found in the RL lists.
2113 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
2115 /* Yep, we have a rate is it above this rate? */
2116 if (bw_est > high_rate) {
2128 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod)
2130 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2131 union tcp_log_stackspecific log;
2134 if ((mod != 1) && (rack_verbose_logging == 0)) {
2136 * We get 3 values currently for mod
2137 * 1 - We are retransmitting and this tells the reason.
2138 * 2 - We are clearing a dup-ack count.
2139 * 3 - We are incrementing a dup-ack count.
2141 * The clear/increment are only logged
2142 * if you have BBverbose on.
2146 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2147 log.u_bbr.flex1 = tsused;
2148 log.u_bbr.flex2 = thresh;
2149 log.u_bbr.flex3 = rsm->r_flags;
2150 log.u_bbr.flex4 = rsm->r_dupack;
2151 log.u_bbr.flex5 = rsm->r_start;
2152 log.u_bbr.flex6 = rsm->r_end;
2153 log.u_bbr.flex8 = mod;
2154 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2155 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2156 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2157 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2158 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2159 log.u_bbr.pacing_gain = rack->r_must_retran;
2160 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2161 &rack->rc_inp->inp_socket->so_rcv,
2162 &rack->rc_inp->inp_socket->so_snd,
2163 BBR_LOG_SETTINGS_CHG, 0,
2164 0, &log, false, &tv);
2169 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
2171 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2172 union tcp_log_stackspecific log;
2175 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2176 log.u_bbr.flex1 = rack->rc_tp->t_srtt;
2177 log.u_bbr.flex2 = to;
2178 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
2179 log.u_bbr.flex4 = slot;
2180 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot;
2181 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2182 log.u_bbr.flex7 = rack->rc_in_persist;
2183 log.u_bbr.flex8 = which;
2184 if (rack->rack_no_prr)
2185 log.u_bbr.pkts_out = 0;
2187 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
2188 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2189 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2190 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2191 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2192 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2193 log.u_bbr.pacing_gain = rack->r_must_retran;
2194 log.u_bbr.cwnd_gain = rack->rc_has_collapsed;
2195 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift;
2196 log.u_bbr.lost = rack_rto_min;
2197 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2198 &rack->rc_inp->inp_socket->so_rcv,
2199 &rack->rc_inp->inp_socket->so_snd,
2200 BBR_LOG_TIMERSTAR, 0,
2201 0, &log, false, &tv);
2206 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm)
2208 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2209 union tcp_log_stackspecific log;
2212 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2213 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2214 log.u_bbr.flex8 = to_num;
2215 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
2216 log.u_bbr.flex2 = rack->rc_rack_rtt;
2218 log.u_bbr.flex3 = 0;
2220 log.u_bbr.flex3 = rsm->r_end - rsm->r_start;
2221 if (rack->rack_no_prr)
2222 log.u_bbr.flex5 = 0;
2224 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2225 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2226 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2227 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2228 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2229 log.u_bbr.pacing_gain = rack->r_must_retran;
2230 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2231 &rack->rc_inp->inp_socket->so_rcv,
2232 &rack->rc_inp->inp_socket->so_snd,
2234 0, &log, false, &tv);
2239 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack,
2240 struct rack_sendmap *prev,
2241 struct rack_sendmap *rsm,
2242 struct rack_sendmap *next,
2243 int flag, uint32_t th_ack, int line)
2245 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
2246 union tcp_log_stackspecific log;
2249 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2250 log.u_bbr.flex8 = flag;
2251 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2252 log.u_bbr.cur_del_rate = (uint64_t)prev;
2253 log.u_bbr.delRate = (uint64_t)rsm;
2254 log.u_bbr.rttProp = (uint64_t)next;
2255 log.u_bbr.flex7 = 0;
2257 log.u_bbr.flex1 = prev->r_start;
2258 log.u_bbr.flex2 = prev->r_end;
2259 log.u_bbr.flex7 |= 0x4;
2262 log.u_bbr.flex3 = rsm->r_start;
2263 log.u_bbr.flex4 = rsm->r_end;
2264 log.u_bbr.flex7 |= 0x2;
2267 log.u_bbr.flex5 = next->r_start;
2268 log.u_bbr.flex6 = next->r_end;
2269 log.u_bbr.flex7 |= 0x1;
2271 log.u_bbr.applimited = line;
2272 log.u_bbr.pkts_out = th_ack;
2273 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2274 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2275 if (rack->rack_no_prr)
2278 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt;
2279 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2280 &rack->rc_inp->inp_socket->so_rcv,
2281 &rack->rc_inp->inp_socket->so_snd,
2283 0, &log, false, &tv);
2288 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len,
2289 struct rack_sendmap *rsm, int conf)
2291 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
2292 union tcp_log_stackspecific log;
2294 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2295 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2296 log.u_bbr.flex1 = t;
2297 log.u_bbr.flex2 = len;
2298 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt;
2299 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
2300 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;
2301 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2302 log.u_bbr.flex7 = conf;
2303 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot;
2304 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
2305 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2306 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2307 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags;
2308 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2310 log.u_bbr.pkt_epoch = rsm->r_start;
2311 log.u_bbr.lost = rsm->r_end;
2312 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt;
2313 /* We loose any upper of the 24 bits */
2314 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags;
2317 log.u_bbr.pkt_epoch = rack->rc_tp->iss;
2319 log.u_bbr.cwnd_gain = 0;
2320 log.u_bbr.pacing_gain = 0;
2322 /* Write out general bits of interest rrs here */
2323 log.u_bbr.use_lt_bw = rack->rc_highly_buffered;
2324 log.u_bbr.use_lt_bw <<= 1;
2325 log.u_bbr.use_lt_bw |= rack->forced_ack;
2326 log.u_bbr.use_lt_bw <<= 1;
2327 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul;
2328 log.u_bbr.use_lt_bw <<= 1;
2329 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
2330 log.u_bbr.use_lt_bw <<= 1;
2331 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
2332 log.u_bbr.use_lt_bw <<= 1;
2333 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
2334 log.u_bbr.use_lt_bw <<= 1;
2335 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
2336 log.u_bbr.use_lt_bw <<= 1;
2337 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom;
2338 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight;
2339 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts;
2340 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered;
2341 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts;
2342 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt;
2343 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
2344 log.u_bbr.bw_inuse <<= 32;
2346 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]);
2347 TCP_LOG_EVENTP(tp, NULL,
2348 &rack->rc_inp->inp_socket->so_rcv,
2349 &rack->rc_inp->inp_socket->so_snd,
2351 0, &log, false, &tv);
2358 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
2361 * Log the rtt sample we are
2362 * applying to the srtt algorithm in
2365 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2366 union tcp_log_stackspecific log;
2369 /* Convert our ms to a microsecond */
2370 memset(&log, 0, sizeof(log));
2371 log.u_bbr.flex1 = rtt;
2372 log.u_bbr.flex2 = rack->r_ctl.ack_count;
2373 log.u_bbr.flex3 = rack->r_ctl.sack_count;
2374 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
2375 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra;
2376 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2377 log.u_bbr.flex7 = 1;
2378 log.u_bbr.flex8 = rack->sack_attack_disable;
2379 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2380 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2381 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2382 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2383 log.u_bbr.pacing_gain = rack->r_must_retran;
2385 * We capture in delRate the upper 32 bits as
2386 * the confidence level we had declared, and the
2387 * lower 32 bits as the actual RTT using the arrival
2390 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence;
2391 log.u_bbr.delRate <<= 32;
2392 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt;
2393 /* Lets capture all the things that make up t_rtxcur */
2394 log.u_bbr.applimited = rack_rto_min;
2395 log.u_bbr.epoch = rack_rto_max;
2396 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop;
2397 log.u_bbr.lost = rack_rto_min;
2398 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop);
2399 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp);
2400 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec;
2401 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC;
2402 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec;
2403 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2404 &rack->rc_inp->inp_socket->so_rcv,
2405 &rack->rc_inp->inp_socket->so_snd,
2407 0, &log, false, &tv);
2412 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where)
2414 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
2415 union tcp_log_stackspecific log;
2418 /* Convert our ms to a microsecond */
2419 memset(&log, 0, sizeof(log));
2420 log.u_bbr.flex1 = rtt;
2421 log.u_bbr.flex2 = send_time;
2422 log.u_bbr.flex3 = ack_time;
2423 log.u_bbr.flex4 = where;
2424 log.u_bbr.flex7 = 2;
2425 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2426 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2427 &rack->rc_inp->inp_socket->so_rcv,
2428 &rack->rc_inp->inp_socket->so_snd,
2430 0, &log, false, &tv);
2437 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line)
2439 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
2440 union tcp_log_stackspecific log;
2443 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2444 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2445 log.u_bbr.flex1 = line;
2446 log.u_bbr.flex2 = tick;
2447 log.u_bbr.flex3 = tp->t_maxunacktime;
2448 log.u_bbr.flex4 = tp->t_acktime;
2449 log.u_bbr.flex8 = event;
2450 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2451 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2452 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2453 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2454 log.u_bbr.pacing_gain = rack->r_must_retran;
2455 TCP_LOG_EVENTP(tp, NULL,
2456 &rack->rc_inp->inp_socket->so_rcv,
2457 &rack->rc_inp->inp_socket->so_snd,
2458 BBR_LOG_PROGRESS, 0,
2459 0, &log, false, &tv);
2464 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv)
2466 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2467 union tcp_log_stackspecific log;
2469 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2470 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2471 log.u_bbr.flex1 = slot;
2472 if (rack->rack_no_prr)
2473 log.u_bbr.flex2 = 0;
2475 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt;
2476 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
2477 log.u_bbr.flex8 = rack->rc_in_persist;
2478 log.u_bbr.timeStamp = cts;
2479 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2480 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2481 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2482 log.u_bbr.pacing_gain = rack->r_must_retran;
2483 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2484 &rack->rc_inp->inp_socket->so_rcv,
2485 &rack->rc_inp->inp_socket->so_snd,
2487 0, &log, false, tv);
2492 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs)
2494 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2495 union tcp_log_stackspecific log;
2498 memset(&log, 0, sizeof(log));
2499 log.u_bbr.flex1 = did_out;
2500 log.u_bbr.flex2 = nxt_pkt;
2501 log.u_bbr.flex3 = way_out;
2502 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2503 if (rack->rack_no_prr)
2504 log.u_bbr.flex5 = 0;
2506 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2507 log.u_bbr.flex6 = nsegs;
2508 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs;
2509 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */
2510 log.u_bbr.flex7 <<= 1;
2511 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */
2512 log.u_bbr.flex7 <<= 1;
2513 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */
2514 log.u_bbr.flex8 = rack->rc_in_persist;
2515 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2516 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2517 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2518 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
2519 log.u_bbr.use_lt_bw <<= 1;
2520 log.u_bbr.use_lt_bw |= rack->r_might_revert;
2521 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2522 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2523 log.u_bbr.pacing_gain = rack->r_must_retran;
2524 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2525 &rack->rc_inp->inp_socket->so_rcv,
2526 &rack->rc_inp->inp_socket->so_snd,
2527 BBR_LOG_DOSEG_DONE, 0,
2528 0, &log, false, &tv);
2533 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm)
2535 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
2536 union tcp_log_stackspecific log;
2539 memset(&log, 0, sizeof(log));
2540 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs;
2541 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
2542 log.u_bbr.flex4 = arg1;
2543 log.u_bbr.flex5 = arg2;
2544 log.u_bbr.flex6 = arg3;
2545 log.u_bbr.flex8 = frm;
2546 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2547 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2548 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2549 log.u_bbr.applimited = rack->r_ctl.rc_sacked;
2550 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2551 log.u_bbr.pacing_gain = rack->r_must_retran;
2552 TCP_LOG_EVENTP(tp, NULL,
2553 &tp->t_inpcb->inp_socket->so_rcv,
2554 &tp->t_inpcb->inp_socket->so_snd,
2555 TCP_HDWR_PACE_SIZE, 0,
2556 0, &log, false, &tv);
2561 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot,
2562 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use)
2564 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2565 union tcp_log_stackspecific log;
2568 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2569 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2570 log.u_bbr.flex1 = slot;
2571 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
2572 log.u_bbr.flex4 = reason;
2573 if (rack->rack_no_prr)
2574 log.u_bbr.flex5 = 0;
2576 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2577 log.u_bbr.flex7 = hpts_calling;
2578 log.u_bbr.flex8 = rack->rc_in_persist;
2579 log.u_bbr.lt_epoch = cwnd_to_use;
2580 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2581 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2582 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2583 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2584 log.u_bbr.pacing_gain = rack->r_must_retran;
2585 log.u_bbr.cwnd_gain = rack->rc_has_collapsed;
2586 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2587 &rack->rc_inp->inp_socket->so_rcv,
2588 &rack->rc_inp->inp_socket->so_snd,
2590 tlen, &log, false, &tv);
2595 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts,
2596 struct timeval *tv, uint32_t flags_on_entry)
2598 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2599 union tcp_log_stackspecific log;
2601 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2602 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2603 log.u_bbr.flex1 = line;
2604 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to;
2605 log.u_bbr.flex3 = flags_on_entry;
2606 log.u_bbr.flex4 = us_cts;
2607 if (rack->rack_no_prr)
2608 log.u_bbr.flex5 = 0;
2610 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2611 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2612 log.u_bbr.flex7 = hpts_removed;
2613 log.u_bbr.flex8 = 1;
2614 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags;
2615 log.u_bbr.timeStamp = us_cts;
2616 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2617 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2618 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2619 log.u_bbr.pacing_gain = rack->r_must_retran;
2620 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2621 &rack->rc_inp->inp_socket->so_rcv,
2622 &rack->rc_inp->inp_socket->so_snd,
2623 BBR_LOG_TIMERCANC, 0,
2624 0, &log, false, tv);
2629 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
2630 uint32_t flex1, uint32_t flex2,
2631 uint32_t flex3, uint32_t flex4,
2632 uint32_t flex5, uint32_t flex6,
2633 uint16_t flex7, uint8_t mod)
2635 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2636 union tcp_log_stackspecific log;
2640 /* No you can't use 1, its for the real to cancel */
2643 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2644 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2645 log.u_bbr.flex1 = flex1;
2646 log.u_bbr.flex2 = flex2;
2647 log.u_bbr.flex3 = flex3;
2648 log.u_bbr.flex4 = flex4;
2649 log.u_bbr.flex5 = flex5;
2650 log.u_bbr.flex6 = flex6;
2651 log.u_bbr.flex7 = flex7;
2652 log.u_bbr.flex8 = mod;
2653 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2654 &rack->rc_inp->inp_socket->so_rcv,
2655 &rack->rc_inp->inp_socket->so_snd,
2656 BBR_LOG_TIMERCANC, 0,
2657 0, &log, false, &tv);
2662 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
2664 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2665 union tcp_log_stackspecific log;
2668 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2669 log.u_bbr.flex1 = timers;
2670 log.u_bbr.flex2 = ret;
2671 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
2672 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2673 log.u_bbr.flex5 = cts;
2674 if (rack->rack_no_prr)
2675 log.u_bbr.flex6 = 0;
2677 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt;
2678 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2679 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2680 log.u_bbr.pacing_gain = rack->r_must_retran;
2681 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2682 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2683 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2684 &rack->rc_inp->inp_socket->so_rcv,
2685 &rack->rc_inp->inp_socket->so_snd,
2686 BBR_LOG_TO_PROCESS, 0,
2687 0, &log, false, &tv);
2692 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line)
2694 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2695 union tcp_log_stackspecific log;
2698 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2699 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out;
2700 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs;
2701 if (rack->rack_no_prr)
2702 log.u_bbr.flex3 = 0;
2704 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt;
2705 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered;
2706 log.u_bbr.flex5 = rack->r_ctl.rc_sacked;
2707 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt;
2708 log.u_bbr.flex7 = line;
2709 log.u_bbr.flex8 = frm;
2710 log.u_bbr.pkts_out = orig_cwnd;
2711 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2712 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2713 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
2714 log.u_bbr.use_lt_bw <<= 1;
2715 log.u_bbr.use_lt_bw |= rack->r_might_revert;
2716 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2717 &rack->rc_inp->inp_socket->so_rcv,
2718 &rack->rc_inp->inp_socket->so_snd,
2720 0, &log, false, &tv);
2724 #ifdef NETFLIX_EXP_DETECTION
2726 rack_log_sad(struct tcp_rack *rack, int event)
2728 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2729 union tcp_log_stackspecific log;
2732 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2733 log.u_bbr.flex1 = rack->r_ctl.sack_count;
2734 log.u_bbr.flex2 = rack->r_ctl.ack_count;
2735 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra;
2736 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
2737 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced;
2738 log.u_bbr.flex6 = tcp_sack_to_ack_thresh;
2739 log.u_bbr.pkts_out = tcp_sack_to_move_thresh;
2740 log.u_bbr.lt_epoch = (tcp_force_detection << 8);
2741 log.u_bbr.lt_epoch |= rack->do_detection;
2742 log.u_bbr.applimited = tcp_map_minimum;
2743 log.u_bbr.flex7 = rack->sack_attack_disable;
2744 log.u_bbr.flex8 = event;
2745 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2746 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2747 log.u_bbr.delivered = tcp_sad_decay_val;
2748 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2749 &rack->rc_inp->inp_socket->so_rcv,
2750 &rack->rc_inp->inp_socket->so_snd,
2751 TCP_SAD_DETECTION, 0,
2752 0, &log, false, &tv);
2758 rack_counter_destroy(void)
2760 counter_u64_free(rack_fto_send);
2761 counter_u64_free(rack_fto_rsm_send);
2762 counter_u64_free(rack_nfto_resend);
2763 counter_u64_free(rack_hw_pace_init_fail);
2764 counter_u64_free(rack_hw_pace_lost);
2765 counter_u64_free(rack_non_fto_send);
2766 counter_u64_free(rack_extended_rfo);
2767 counter_u64_free(rack_ack_total);
2768 counter_u64_free(rack_express_sack);
2769 counter_u64_free(rack_sack_total);
2770 counter_u64_free(rack_move_none);
2771 counter_u64_free(rack_move_some);
2772 counter_u64_free(rack_sack_attacks_detected);
2773 counter_u64_free(rack_sack_attacks_reversed);
2774 counter_u64_free(rack_sack_used_next_merge);
2775 counter_u64_free(rack_sack_used_prev_merge);
2776 counter_u64_free(rack_tlp_tot);
2777 counter_u64_free(rack_tlp_newdata);
2778 counter_u64_free(rack_tlp_retran);
2779 counter_u64_free(rack_tlp_retran_bytes);
2780 counter_u64_free(rack_to_tot);
2781 counter_u64_free(rack_saw_enobuf);
2782 counter_u64_free(rack_saw_enobuf_hw);
2783 counter_u64_free(rack_saw_enetunreach);
2784 counter_u64_free(rack_hot_alloc);
2785 counter_u64_free(rack_to_alloc);
2786 counter_u64_free(rack_to_alloc_hard);
2787 counter_u64_free(rack_to_alloc_emerg);
2788 counter_u64_free(rack_to_alloc_limited);
2789 counter_u64_free(rack_alloc_limited_conns);
2790 counter_u64_free(rack_split_limited);
2791 counter_u64_free(rack_multi_single_eq);
2792 counter_u64_free(rack_proc_non_comp_ack);
2793 counter_u64_free(rack_sack_proc_all);
2794 counter_u64_free(rack_sack_proc_restart);
2795 counter_u64_free(rack_sack_proc_short);
2796 counter_u64_free(rack_sack_skipped_acked);
2797 counter_u64_free(rack_sack_splits);
2798 counter_u64_free(rack_input_idle_reduces);
2799 counter_u64_free(rack_collapsed_win);
2800 counter_u64_free(rack_collapsed_win_rxt);
2801 counter_u64_free(rack_collapsed_win_rxt_bytes);
2802 counter_u64_free(rack_collapsed_win_seen);
2803 counter_u64_free(rack_try_scwnd);
2804 counter_u64_free(rack_persists_sends);
2805 counter_u64_free(rack_persists_acks);
2806 counter_u64_free(rack_persists_loss);
2807 counter_u64_free(rack_persists_lost_ends);
2809 counter_u64_free(rack_adjust_map_bw);
2811 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
2812 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
2815 static struct rack_sendmap *
2816 rack_alloc(struct tcp_rack *rack)
2818 struct rack_sendmap *rsm;
2821 * First get the top of the list it in
2822 * theory is the "hottest" rsm we have,
2823 * possibly just freed by ack processing.
2825 if (rack->rc_free_cnt > rack_free_cache) {
2826 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
2827 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2828 counter_u64_add(rack_hot_alloc, 1);
2829 rack->rc_free_cnt--;
2833 * Once we get under our free cache we probably
2834 * no longer have a "hot" one available. Lets
2837 rsm = uma_zalloc(rack_zone, M_NOWAIT);
2839 rack->r_ctl.rc_num_maps_alloced++;
2840 counter_u64_add(rack_to_alloc, 1);
2844 * Dig in to our aux rsm's (the last two) since
2845 * UMA failed to get us one.
2847 if (rack->rc_free_cnt) {
2848 counter_u64_add(rack_to_alloc_emerg, 1);
2849 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
2850 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2851 rack->rc_free_cnt--;
2857 static struct rack_sendmap *
2858 rack_alloc_full_limit(struct tcp_rack *rack)
2860 if ((V_tcp_map_entries_limit > 0) &&
2861 (rack->do_detection == 0) &&
2862 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
2863 counter_u64_add(rack_to_alloc_limited, 1);
2864 if (!rack->alloc_limit_reported) {
2865 rack->alloc_limit_reported = 1;
2866 counter_u64_add(rack_alloc_limited_conns, 1);
2870 return (rack_alloc(rack));
2873 /* wrapper to allocate a sendmap entry, subject to a specific limit */
2874 static struct rack_sendmap *
2875 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type)
2877 struct rack_sendmap *rsm;
2880 /* currently there is only one limit type */
2881 if (V_tcp_map_split_limit > 0 &&
2882 (rack->do_detection == 0) &&
2883 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) {
2884 counter_u64_add(rack_split_limited, 1);
2885 if (!rack->alloc_limit_reported) {
2886 rack->alloc_limit_reported = 1;
2887 counter_u64_add(rack_alloc_limited_conns, 1);
2893 /* allocate and mark in the limit type, if set */
2894 rsm = rack_alloc(rack);
2895 if (rsm != NULL && limit_type) {
2896 rsm->r_limit_type = limit_type;
2897 rack->r_ctl.rc_num_split_allocs++;
2903 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
2905 if (rsm->r_flags & RACK_APP_LIMITED) {
2906 if (rack->r_ctl.rc_app_limited_cnt > 0) {
2907 rack->r_ctl.rc_app_limited_cnt--;
2910 if (rsm->r_limit_type) {
2911 /* currently there is only one limit type */
2912 rack->r_ctl.rc_num_split_allocs--;
2914 if (rsm == rack->r_ctl.rc_first_appl) {
2915 if (rack->r_ctl.rc_app_limited_cnt == 0)
2916 rack->r_ctl.rc_first_appl = NULL;
2918 /* Follow the next one out */
2919 struct rack_sendmap fe;
2921 fe.r_start = rsm->r_nseq_appl;
2922 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
2925 if (rsm == rack->r_ctl.rc_resend)
2926 rack->r_ctl.rc_resend = NULL;
2927 if (rsm == rack->r_ctl.rc_end_appl)
2928 rack->r_ctl.rc_end_appl = NULL;
2929 if (rack->r_ctl.rc_tlpsend == rsm)
2930 rack->r_ctl.rc_tlpsend = NULL;
2931 if (rack->r_ctl.rc_sacklast == rsm)
2932 rack->r_ctl.rc_sacklast = NULL;
2933 memset(rsm, 0, sizeof(struct rack_sendmap));
2934 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext);
2935 rack->rc_free_cnt++;
2939 rack_free_trim(struct tcp_rack *rack)
2941 struct rack_sendmap *rsm;
2944 * Free up all the tail entries until
2945 * we get our list down to the limit.
2947 while (rack->rc_free_cnt > rack_free_cache) {
2948 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head);
2949 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2950 rack->rc_free_cnt--;
2951 uma_zfree(rack_zone, rsm);
2957 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack)
2959 uint64_t srtt, bw, len, tim;
2960 uint32_t segsiz, def_len, minl;
2962 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
2963 def_len = rack_def_data_window * segsiz;
2964 if (rack->rc_gp_filled == 0) {
2966 * We have no measurement (IW is in flight?) so
2967 * we can only guess using our data_window sysctl
2968 * value (usually 20MSS).
2973 * Now we have a number of factors to consider.
2975 * 1) We have a desired BDP which is usually
2977 * 2) We have a minimum number of rtt's usually 1 SRTT
2978 * but we allow it too to be more.
2979 * 3) We want to make sure a measurement last N useconds (if
2980 * we have set rack_min_measure_usec.
2982 * We handle the first concern here by trying to create a data
2983 * window of max(rack_def_data_window, DesiredBDP). The
2984 * second concern we handle in not letting the measurement
2985 * window end normally until at least the required SRTT's
2986 * have gone by which is done further below in
2987 * rack_enough_for_measurement(). Finally the third concern
2988 * we also handle here by calculating how long that time
2989 * would take at the current BW and then return the
2990 * max of our first calculation and that length. Note
2991 * that if rack_min_measure_usec is 0, we don't deal
2992 * with concern 3. Also for both Concern 1 and 3 an
2993 * application limited period could end the measurement
2996 * So lets calculate the BDP with the "known" b/w using
2997 * the SRTT has our rtt and then multiply it by the
3000 bw = rack_get_bw(rack);
3001 srtt = (uint64_t)tp->t_srtt;
3003 len /= (uint64_t)HPTS_USEC_IN_SEC;
3004 len *= max(1, rack_goal_bdp);
3005 /* Now we need to round up to the nearest MSS */
3006 len = roundup(len, segsiz);
3007 if (rack_min_measure_usec) {
3008 /* Now calculate our min length for this b/w */
3009 tim = rack_min_measure_usec;
3010 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC;
3013 minl = roundup(minl, segsiz);
3018 * Now if we have a very small window we want
3019 * to attempt to get the window that is
3020 * as small as possible. This happens on
3021 * low b/w connections and we don't want to
3022 * span huge numbers of rtt's between measurements.
3024 * We basically include 2 over our "MIN window" so
3025 * that the measurement can be shortened (possibly) by
3029 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz)));
3031 return (max((uint32_t)len, def_len));
3036 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality)
3038 uint32_t tim, srtts, segsiz;
3041 * Has enough time passed for the GP measurement to be valid?
3043 if ((tp->snd_max == tp->snd_una) ||
3044 (th_ack == tp->snd_max)){
3046 *quality = RACK_QUALITY_ALLACKED;
3049 if (SEQ_LT(th_ack, tp->gput_seq)) {
3050 /* Not enough bytes yet */
3053 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
3054 if (SEQ_LT(th_ack, tp->gput_ack) &&
3055 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
3056 /* Not enough bytes yet */
3059 if (rack->r_ctl.rc_first_appl &&
3060 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) {
3062 * We are up to the app limited send point
3063 * we have to measure irrespective of the time..
3065 *quality = RACK_QUALITY_APPLIMITED;
3068 /* Now what about time? */
3069 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts);
3070 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts;
3072 *quality = RACK_QUALITY_HIGH;
3075 /* Nope not even a full SRTT has passed */
3080 rack_log_timely(struct tcp_rack *rack,
3081 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd,
3082 uint64_t up_bnd, int line, uint8_t method)
3084 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
3085 union tcp_log_stackspecific log;
3088 memset(&log, 0, sizeof(log));
3089 log.u_bbr.flex1 = logged;
3090 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt;
3091 log.u_bbr.flex2 <<= 4;
3092 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt;
3093 log.u_bbr.flex2 <<= 4;
3094 log.u_bbr.flex2 |= rack->rc_gp_incr;
3095 log.u_bbr.flex2 <<= 4;
3096 log.u_bbr.flex2 |= rack->rc_gp_bwred;
3097 log.u_bbr.flex3 = rack->rc_gp_incr;
3098 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
3099 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca;
3100 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec;
3101 log.u_bbr.flex7 = rack->rc_gp_bwred;
3102 log.u_bbr.flex8 = method;
3103 log.u_bbr.cur_del_rate = cur_bw;
3104 log.u_bbr.delRate = low_bnd;
3105 log.u_bbr.bw_inuse = up_bnd;
3106 log.u_bbr.rttProp = rack_get_bw(rack);
3107 log.u_bbr.pkt_epoch = line;
3108 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
3109 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3110 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3111 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
3112 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
3113 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom;
3114 log.u_bbr.cwnd_gain <<= 1;
3115 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec;
3116 log.u_bbr.cwnd_gain <<= 1;
3117 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
3118 log.u_bbr.cwnd_gain <<= 1;
3119 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
3120 log.u_bbr.lost = rack->r_ctl.rc_loss_count;
3121 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3122 &rack->rc_inp->inp_socket->so_rcv,
3123 &rack->rc_inp->inp_socket->so_snd,
3125 0, &log, false, &tv);
3130 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult)
3133 * Before we increase we need to know if
3134 * the estimate just made was less than
3135 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est)
3137 * If we already are pacing at a fast enough
3138 * rate to push us faster there is no sense of
3141 * We first caculate our actual pacing rate (ss or ca multiplier
3142 * times our cur_bw).
3144 * Then we take the last measured rate and multipy by our
3145 * maximum pacing overage to give us a max allowable rate.
3147 * If our act_rate is smaller than our max_allowable rate
3148 * then we should increase. Else we should hold steady.
3151 uint64_t act_rate, max_allow_rate;
3153 if (rack_timely_no_stopping)
3156 if ((cur_bw == 0) || (last_bw_est == 0)) {
3158 * Initial startup case or
3159 * everything is acked case.
3161 rack_log_timely(rack, mult, cur_bw, 0, 0,
3167 * We can always pace at or slightly above our rate.
3169 rack_log_timely(rack, mult, cur_bw, 0, 0,
3173 act_rate = cur_bw * (uint64_t)mult;
3175 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100);
3176 max_allow_rate /= 100;
3177 if (act_rate < max_allow_rate) {
3179 * Here the rate we are actually pacing at
3180 * is smaller than 10% above our last measurement.
3181 * This means we are pacing below what we would
3182 * like to try to achieve (plus some wiggle room).
3184 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
3189 * Here we are already pacing at least rack_max_per_above(10%)
3190 * what we are getting back. This indicates most likely
3191 * that we are being limited (cwnd/rwnd/app) and can't
3192 * get any more b/w. There is no sense of trying to
3193 * raise up the pacing rate its not speeding us up
3194 * and we already are pacing faster than we are getting.
3196 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
3203 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack)
3206 * When we drag bottom, we want to assure
3207 * that no multiplier is below 1.0, if so
3208 * we want to restore it to at least that.
3210 if (rack->r_ctl.rack_per_of_gp_rec < 100) {
3211 /* This is unlikely we usually do not touch recovery */
3212 rack->r_ctl.rack_per_of_gp_rec = 100;
3214 if (rack->r_ctl.rack_per_of_gp_ca < 100) {
3215 rack->r_ctl.rack_per_of_gp_ca = 100;
3217 if (rack->r_ctl.rack_per_of_gp_ss < 100) {
3218 rack->r_ctl.rack_per_of_gp_ss = 100;
3223 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack)
3225 if (rack->r_ctl.rack_per_of_gp_ca > 100) {
3226 rack->r_ctl.rack_per_of_gp_ca = 100;
3228 if (rack->r_ctl.rack_per_of_gp_ss > 100) {
3229 rack->r_ctl.rack_per_of_gp_ss = 100;
3234 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override)
3236 int32_t calc, logged, plus;
3242 * override is passed when we are
3243 * loosing b/w and making one last
3244 * gasp at trying to not loose out
3245 * to a new-reno flow.
3249 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */
3250 if (rack->rc_gp_incr &&
3251 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) {
3253 * Reset and get 5 strokes more before the boost. Note
3254 * that the count is 0 based so we have to add one.
3257 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST;
3258 rack->rc_gp_timely_inc_cnt = 0;
3260 plus = (uint32_t)rack_gp_increase_per;
3261 /* Must be at least 1% increase for true timely increases */
3263 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0)))
3265 if (rack->rc_gp_saw_rec &&
3266 (rack->rc_gp_no_rec_chg == 0) &&
3267 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3268 rack->r_ctl.rack_per_of_gp_rec)) {
3269 /* We have been in recovery ding it too */
3270 calc = rack->r_ctl.rack_per_of_gp_rec + plus;
3274 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc;
3275 if (rack_per_upper_bound_ss &&
3276 (rack->rc_dragged_bottom == 0) &&
3277 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss))
3278 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss;
3280 if (rack->rc_gp_saw_ca &&
3281 (rack->rc_gp_saw_ss == 0) &&
3282 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3283 rack->r_ctl.rack_per_of_gp_ca)) {
3285 calc = rack->r_ctl.rack_per_of_gp_ca + plus;
3289 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc;
3290 if (rack_per_upper_bound_ca &&
3291 (rack->rc_dragged_bottom == 0) &&
3292 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca))
3293 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca;
3295 if (rack->rc_gp_saw_ss &&
3296 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3297 rack->r_ctl.rack_per_of_gp_ss)) {
3299 calc = rack->r_ctl.rack_per_of_gp_ss + plus;
3302 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc;
3303 if (rack_per_upper_bound_ss &&
3304 (rack->rc_dragged_bottom == 0) &&
3305 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss))
3306 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss;
3310 (rack->rc_gp_incr == 0)){
3311 /* Go into increment mode */
3312 rack->rc_gp_incr = 1;
3313 rack->rc_gp_timely_inc_cnt = 0;
3315 if (rack->rc_gp_incr &&
3317 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) {
3318 rack->rc_gp_timely_inc_cnt++;
3320 rack_log_timely(rack, logged, plus, 0, 0,
3325 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff)
3328 * norm_grad = rtt_diff / minrtt;
3329 * new_per = curper * (1 - B * norm_grad)
3331 * B = rack_gp_decrease_per (default 10%)
3332 * rtt_dif = input var current rtt-diff
3333 * curper = input var current percentage
3334 * minrtt = from rack filter
3339 perf = (((uint64_t)curper * ((uint64_t)1000000 -
3340 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 *
3341 (((uint64_t)rtt_diff * (uint64_t)1000000)/
3342 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/
3343 (uint64_t)1000000)) /
3345 if (perf > curper) {
3349 return ((uint32_t)perf);
3353 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt)
3357 * result = curper * (1 - (B * ( 1 - ------ ))
3360 * B = rack_gp_decrease_per (default 10%)
3361 * highrttthresh = filter_min * rack_gp_rtt_maxmul
3364 uint32_t highrttthresh;
3366 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
3368 perf = (((uint64_t)curper * ((uint64_t)1000000 -
3369 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 -
3370 ((uint64_t)highrttthresh * (uint64_t)1000000) /
3371 (uint64_t)rtt)) / 100)) /(uint64_t)1000000);
3376 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff)
3378 uint64_t logvar, logvar2, logvar3;
3379 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val;
3381 if (rack->rc_gp_incr) {
3382 /* Turn off increment counting */
3383 rack->rc_gp_incr = 0;
3384 rack->rc_gp_timely_inc_cnt = 0;
3386 ss_red = ca_red = rec_red = 0;
3388 /* Calculate the reduction value */
3392 /* Must be at least 1% reduction */
3393 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) {
3394 /* We have been in recovery ding it too */
3395 if (timely_says == 2) {
3396 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt);
3397 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3403 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3404 if (rack->r_ctl.rack_per_of_gp_rec > val) {
3405 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val);
3406 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val;
3408 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
3411 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec)
3412 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
3415 if (rack->rc_gp_saw_ss) {
3417 if (timely_says == 2) {
3418 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt);
3419 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3425 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff);
3426 if (rack->r_ctl.rack_per_of_gp_ss > new_per) {
3427 ss_red = rack->r_ctl.rack_per_of_gp_ss - val;
3428 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val;
3431 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
3435 logvar2 = (uint32_t)rtt;
3437 logvar2 |= (uint32_t)rtt_diff;
3438 logvar3 = rack_gp_rtt_maxmul;
3440 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3441 rack_log_timely(rack, timely_says,
3443 logvar, __LINE__, 10);
3445 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss)
3446 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
3448 } else if (rack->rc_gp_saw_ca) {
3450 if (timely_says == 2) {
3451 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt);
3452 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3458 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff);
3459 if (rack->r_ctl.rack_per_of_gp_ca > val) {
3460 ca_red = rack->r_ctl.rack_per_of_gp_ca - val;
3461 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val;
3463 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
3468 logvar2 = (uint32_t)rtt;
3470 logvar2 |= (uint32_t)rtt_diff;
3471 logvar3 = rack_gp_rtt_maxmul;
3473 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3474 rack_log_timely(rack, timely_says,
3476 logvar, __LINE__, 10);
3478 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca)
3479 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
3482 if (rack->rc_gp_timely_dec_cnt < 0x7) {
3483 rack->rc_gp_timely_dec_cnt++;
3484 if (rack_timely_dec_clear &&
3485 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear))
3486 rack->rc_gp_timely_dec_cnt = 0;
3491 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar,
3496 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts,
3497 uint32_t rtt, uint32_t line, uint8_t reas)
3499 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
3500 union tcp_log_stackspecific log;
3503 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3504 log.u_bbr.flex1 = line;
3505 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts;
3506 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts;
3507 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
3508 log.u_bbr.flex5 = rtt;
3509 log.u_bbr.flex6 = rack->rc_highly_buffered;
3510 log.u_bbr.flex6 <<= 1;
3511 log.u_bbr.flex6 |= rack->forced_ack;
3512 log.u_bbr.flex6 <<= 1;
3513 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul;
3514 log.u_bbr.flex6 <<= 1;
3515 log.u_bbr.flex6 |= rack->in_probe_rtt;
3516 log.u_bbr.flex6 <<= 1;
3517 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt;
3518 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt;
3519 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca;
3520 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec;
3521 log.u_bbr.flex8 = reas;
3522 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3523 log.u_bbr.delRate = rack_get_bw(rack);
3524 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt;
3525 log.u_bbr.cur_del_rate <<= 32;
3526 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt;
3527 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered;
3528 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
3529 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3530 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
3531 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
3532 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts;
3533 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight;
3534 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3535 log.u_bbr.rttProp = us_cts;
3536 log.u_bbr.rttProp <<= 32;
3537 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt;
3538 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3539 &rack->rc_inp->inp_socket->so_rcv,
3540 &rack->rc_inp->inp_socket->so_snd,
3541 BBR_LOG_RTT_SHRINKS, 0,
3542 0, &log, false, &rack->r_ctl.act_rcv_time);
3547 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt)
3551 bwdp = rack_get_bw(rack);
3552 bwdp *= (uint64_t)rtt;
3553 bwdp /= (uint64_t)HPTS_USEC_IN_SEC;
3554 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz);
3555 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) {
3557 * A window protocol must be able to have 4 packets
3558 * outstanding as the floor in order to function
3559 * (especially considering delayed ack :D).
3561 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs);
3566 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts)
3569 * ProbeRTT is a bit different in rack_pacing than in
3570 * BBR. It is like BBR in that it uses the lowering of
3571 * the RTT as a signal that we saw something new and
3572 * counts from there for how long between. But it is
3573 * different in that its quite simple. It does not
3574 * play with the cwnd and wait until we get down
3575 * to N segments outstanding and hold that for
3576 * 200ms. Instead it just sets the pacing reduction
3577 * rate to a set percentage (70 by default) and hold
3578 * that for a number of recent GP Srtt's.
3582 if (rack->rc_gp_dyn_mul == 0)
3585 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) {
3589 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
3590 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
3592 * Stop the goodput now, the idea here is
3593 * that future measurements with in_probe_rtt
3594 * won't register if they are not greater so
3595 * we want to get what info (if any) is available
3598 rack_do_goodput_measurement(rack->rc_tp, rack,
3599 rack->rc_tp->snd_una, __LINE__,
3600 RACK_QUALITY_PROBERTT);
3602 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
3603 rack->r_ctl.rc_time_probertt_entered = us_cts;
3604 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
3605 rack->r_ctl.rc_pace_min_segs);
3606 rack->in_probe_rtt = 1;
3607 rack->measure_saw_probe_rtt = 1;
3608 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
3609 rack->r_ctl.rc_time_probertt_starts = 0;
3610 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt;
3611 if (rack_probertt_use_min_rtt_entry)
3612 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
3614 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt);
3615 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3616 __LINE__, RACK_RTTS_ENTERPROBE);
3620 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts)
3622 struct rack_sendmap *rsm;
3625 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
3626 rack->r_ctl.rc_pace_min_segs);
3627 rack->in_probe_rtt = 0;
3628 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
3629 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
3631 * Stop the goodput now, the idea here is
3632 * that future measurements with in_probe_rtt
3633 * won't register if they are not greater so
3634 * we want to get what info (if any) is available
3637 rack_do_goodput_measurement(rack->rc_tp, rack,
3638 rack->rc_tp->snd_una, __LINE__,
3639 RACK_QUALITY_PROBERTT);
3640 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
3642 * We don't have enough data to make a measurement.
3643 * So lets just stop and start here after exiting
3644 * probe-rtt. We probably are not interested in
3645 * the results anyway.
3647 rack->rc_tp->t_flags &= ~TF_GPUTINPROG;
3650 * Measurements through the current snd_max are going
3651 * to be limited by the slower pacing rate.
3653 * We need to mark these as app-limited so we
3654 * don't collapse the b/w.
3656 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
3657 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
3658 if (rack->r_ctl.rc_app_limited_cnt == 0)
3659 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
3662 * Go out to the end app limited and mark
3663 * this new one as next and move the end_appl up
3666 if (rack->r_ctl.rc_end_appl)
3667 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
3668 rack->r_ctl.rc_end_appl = rsm;
3670 rsm->r_flags |= RACK_APP_LIMITED;
3671 rack->r_ctl.rc_app_limited_cnt++;
3674 * Now, we need to examine our pacing rate multipliers.
3675 * If its under 100%, we need to kick it back up to
3676 * 100%. We also don't let it be over our "max" above
3677 * the actual rate i.e. 100% + rack_clamp_atexit_prtt.
3678 * Note setting clamp_atexit_prtt to 0 has the effect
3679 * of setting CA/SS to 100% always at exit (which is
3680 * the default behavior).
3682 if (rack_probertt_clear_is) {
3683 rack->rc_gp_incr = 0;
3684 rack->rc_gp_bwred = 0;
3685 rack->rc_gp_timely_inc_cnt = 0;
3686 rack->rc_gp_timely_dec_cnt = 0;
3688 /* Do we do any clamping at exit? */
3689 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) {
3690 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp;
3691 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp;
3693 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) {
3694 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt;
3695 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt;
3698 * Lets set rtt_diff to 0, so that we will get a "boost"
3701 rack->r_ctl.rc_rtt_diff = 0;
3703 /* Clear all flags so we start fresh */
3704 rack->rc_tp->t_bytes_acked = 0;
3705 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND;
3707 * If configured to, set the cwnd and ssthresh to
3710 if (rack_probe_rtt_sets_cwnd) {
3714 /* Set ssthresh so we get into CA once we hit our target */
3715 if (rack_probertt_use_min_rtt_exit == 1) {
3716 /* Set to min rtt */
3717 rack_set_prtt_target(rack, segsiz,
3718 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
3719 } else if (rack_probertt_use_min_rtt_exit == 2) {
3720 /* Set to current gp rtt */
3721 rack_set_prtt_target(rack, segsiz,
3722 rack->r_ctl.rc_gp_srtt);
3723 } else if (rack_probertt_use_min_rtt_exit == 3) {
3724 /* Set to entry gp rtt */
3725 rack_set_prtt_target(rack, segsiz,
3726 rack->r_ctl.rc_entry_gp_rtt);
3731 sum = rack->r_ctl.rc_entry_gp_rtt;
3733 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt));
3736 * A highly buffered path needs
3737 * cwnd space for timely to work.
3738 * Lets set things up as if
3739 * we are heading back here again.
3741 setval = rack->r_ctl.rc_entry_gp_rtt;
3742 } else if (sum >= 15) {
3744 * Lets take the smaller of the
3745 * two since we are just somewhat
3748 setval = rack->r_ctl.rc_gp_srtt;
3749 if (setval > rack->r_ctl.rc_entry_gp_rtt)
3750 setval = rack->r_ctl.rc_entry_gp_rtt;
3753 * Here we are not highly buffered
3754 * and should pick the min we can to
3755 * keep from causing loss.
3757 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3759 rack_set_prtt_target(rack, segsiz,
3762 if (rack_probe_rtt_sets_cwnd > 1) {
3763 /* There is a percentage here to boost */
3764 ebdp = rack->r_ctl.rc_target_probertt_flight;
3765 ebdp *= rack_probe_rtt_sets_cwnd;
3767 setto = rack->r_ctl.rc_target_probertt_flight + ebdp;
3769 setto = rack->r_ctl.rc_target_probertt_flight;
3770 rack->rc_tp->snd_cwnd = roundup(setto, segsiz);
3771 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) {
3773 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs;
3775 /* If we set in the cwnd also set the ssthresh point so we are in CA */
3776 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1);
3778 rack_log_rtt_shrinks(rack, us_cts,
3779 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3780 __LINE__, RACK_RTTS_EXITPROBE);
3781 /* Clear times last so log has all the info */
3782 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max;
3783 rack->r_ctl.rc_time_probertt_entered = us_cts;
3784 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
3785 rack->r_ctl.rc_time_of_last_probertt = us_cts;
3789 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts)
3791 /* Check in on probe-rtt */
3792 if (rack->rc_gp_filled == 0) {
3793 /* We do not do p-rtt unless we have gp measurements */
3796 if (rack->in_probe_rtt) {
3797 uint64_t no_overflow;
3798 uint32_t endtime, must_stay;
3800 if (rack->r_ctl.rc_went_idle_time &&
3801 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) {
3803 * We went idle during prtt, just exit now.
3805 rack_exit_probertt(rack, us_cts);
3806 } else if (rack_probe_rtt_safety_val &&
3807 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) &&
3808 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) {
3810 * Probe RTT safety value triggered!
3812 rack_log_rtt_shrinks(rack, us_cts,
3813 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3814 __LINE__, RACK_RTTS_SAFETY);
3815 rack_exit_probertt(rack, us_cts);
3817 /* Calculate the max we will wait */
3818 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait);
3819 if (rack->rc_highly_buffered)
3820 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp);
3821 /* Calculate the min we must wait */
3822 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain);
3823 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) &&
3824 TSTMP_LT(us_cts, endtime)) {
3826 /* Do we lower more? */
3828 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered))
3829 calc = us_cts - rack->r_ctl.rc_time_probertt_entered;
3832 calc /= max(rack->r_ctl.rc_gp_srtt, 1);
3835 calc *= rack_per_of_gp_probertt_reduce;
3836 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc;
3838 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh)
3839 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh;
3841 /* We must reach target or the time set */
3844 if (rack->r_ctl.rc_time_probertt_starts == 0) {
3845 if ((TSTMP_LT(us_cts, must_stay) &&
3846 rack->rc_highly_buffered) ||
3847 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) >
3848 rack->r_ctl.rc_target_probertt_flight)) {
3849 /* We are not past the must_stay time */
3852 rack_log_rtt_shrinks(rack, us_cts,
3853 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3854 __LINE__, RACK_RTTS_REACHTARGET);
3855 rack->r_ctl.rc_time_probertt_starts = us_cts;
3856 if (rack->r_ctl.rc_time_probertt_starts == 0)
3857 rack->r_ctl.rc_time_probertt_starts = 1;
3858 /* Restore back to our rate we want to pace at in prtt */
3859 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
3862 * Setup our end time, some number of gp_srtts plus 200ms.
3864 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt *
3865 (uint64_t)rack_probertt_gpsrtt_cnt_mul);
3866 if (rack_probertt_gpsrtt_cnt_div)
3867 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div);
3870 endtime += rack_min_probertt_hold;
3871 endtime += rack->r_ctl.rc_time_probertt_starts;
3872 if (TSTMP_GEQ(us_cts, endtime)) {
3873 /* yes, exit probertt */
3874 rack_exit_probertt(rack, us_cts);
3877 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) {
3878 /* Go into probertt, its been too long since we went lower */
3879 rack_enter_probertt(rack, us_cts);
3884 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est,
3885 uint32_t rtt, int32_t rtt_diff)
3887 uint64_t cur_bw, up_bnd, low_bnd, subfr;
3890 if ((rack->rc_gp_dyn_mul == 0) ||
3891 (rack->use_fixed_rate) ||
3892 (rack->in_probe_rtt) ||
3893 (rack->rc_always_pace == 0)) {
3894 /* No dynamic GP multiplier in play */
3897 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start;
3898 cur_bw = rack_get_bw(rack);
3899 /* Calculate our up and down range */
3900 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up;
3902 up_bnd += rack->r_ctl.last_gp_comp_bw;
3904 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down;
3906 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr;
3907 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) {
3909 * This is the case where our RTT is above
3910 * the max target and we have been configured
3911 * to just do timely no bonus up stuff in that case.
3913 * There are two configurations, set to 1, and we
3914 * just do timely if we are over our max. If its
3915 * set above 1 then we slam the multipliers down
3916 * to 100 and then decrement per timely.
3918 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3920 if (rack->r_ctl.rc_no_push_at_mrtt > 1)
3921 rack_validate_multipliers_at_or_below_100(rack);
3922 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
3923 } else if ((last_bw_est < low_bnd) && !losses) {
3925 * We are decreasing this is a bit complicated this
3926 * means we are loosing ground. This could be
3927 * because another flow entered and we are competing
3928 * for b/w with it. This will push the RTT up which
3929 * makes timely unusable unless we want to get shoved
3930 * into a corner and just be backed off (the age
3931 * old problem with delay based CC).
3933 * On the other hand if it was a route change we
3934 * would like to stay somewhat contained and not
3935 * blow out the buffers.
3937 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3939 rack->r_ctl.last_gp_comp_bw = cur_bw;
3940 if (rack->rc_gp_bwred == 0) {
3941 /* Go into reduction counting */
3942 rack->rc_gp_bwred = 1;
3943 rack->rc_gp_timely_dec_cnt = 0;
3945 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) ||
3946 (timely_says == 0)) {
3948 * Push another time with a faster pacing
3949 * to try to gain back (we include override to
3950 * get a full raise factor).
3952 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) ||
3953 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) ||
3954 (timely_says == 0) ||
3955 (rack_down_raise_thresh == 0)) {
3957 * Do an override up in b/w if we were
3958 * below the threshold or if the threshold
3959 * is zero we always do the raise.
3961 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1);
3963 /* Log it stays the same */
3964 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0,
3967 rack->rc_gp_timely_dec_cnt++;
3968 /* We are not incrementing really no-count */
3969 rack->rc_gp_incr = 0;
3970 rack->rc_gp_timely_inc_cnt = 0;
3973 * Lets just use the RTT
3974 * information and give up
3979 } else if ((timely_says != 2) &&
3981 (last_bw_est > up_bnd)) {
3983 * We are increasing b/w lets keep going, updating
3984 * our b/w and ignoring any timely input, unless
3985 * of course we are at our max raise (if there is one).
3988 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3990 rack->r_ctl.last_gp_comp_bw = cur_bw;
3991 if (rack->rc_gp_saw_ss &&
3992 rack_per_upper_bound_ss &&
3993 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) {
3995 * In cases where we can't go higher
3996 * we should just use timely.
4000 if (rack->rc_gp_saw_ca &&
4001 rack_per_upper_bound_ca &&
4002 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) {
4004 * In cases where we can't go higher
4005 * we should just use timely.
4009 rack->rc_gp_bwred = 0;
4010 rack->rc_gp_timely_dec_cnt = 0;
4011 /* You get a set number of pushes if timely is trying to reduce */
4012 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) {
4013 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4015 /* Log it stays the same */
4016 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0,
4022 * We are staying between the lower and upper range bounds
4023 * so use timely to decide.
4025 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4029 rack->rc_gp_incr = 0;
4030 rack->rc_gp_timely_inc_cnt = 0;
4031 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) &&
4033 (last_bw_est < low_bnd)) {
4034 /* We are loosing ground */
4035 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4036 rack->rc_gp_timely_dec_cnt++;
4037 /* We are not incrementing really no-count */
4038 rack->rc_gp_incr = 0;
4039 rack->rc_gp_timely_inc_cnt = 0;
4041 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
4043 rack->rc_gp_bwred = 0;
4044 rack->rc_gp_timely_dec_cnt = 0;
4045 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4051 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt)
4053 int32_t timely_says;
4054 uint64_t log_mult, log_rtt_a_diff;
4056 log_rtt_a_diff = rtt;
4057 log_rtt_a_diff <<= 32;
4058 log_rtt_a_diff |= (uint32_t)rtt_diff;
4059 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) *
4060 rack_gp_rtt_maxmul)) {
4061 /* Reduce the b/w multiplier */
4063 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
4065 log_mult |= prev_rtt;
4066 rack_log_timely(rack, timely_says, log_mult,
4067 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4068 log_rtt_a_diff, __LINE__, 4);
4069 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
4070 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
4071 max(rack_gp_rtt_mindiv , 1)))) {
4072 /* Increase the b/w multiplier */
4073 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
4074 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
4075 max(rack_gp_rtt_mindiv , 1));
4077 log_mult |= prev_rtt;
4079 rack_log_timely(rack, timely_says, log_mult ,
4080 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4081 log_rtt_a_diff, __LINE__, 5);
4084 * Use a gradient to find it the timely gradient
4086 * grad = rc_rtt_diff / min_rtt;
4088 * anything below or equal to 0 will be
4089 * a increase indication. Anything above
4090 * zero is a decrease. Note we take care
4091 * of the actual gradient calculation
4092 * in the reduction (its not needed for
4095 log_mult = prev_rtt;
4096 if (rtt_diff <= 0) {
4098 * Rttdiff is less than zero, increase the
4099 * b/w multiplier (its 0 or negative)
4102 rack_log_timely(rack, timely_says, log_mult,
4103 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6);
4105 /* Reduce the b/w multiplier */
4107 rack_log_timely(rack, timely_says, log_mult,
4108 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7);
4111 return (timely_says);
4115 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
4116 tcp_seq th_ack, int line, uint8_t quality)
4118 uint64_t tim, bytes_ps, ltim, stim, utim;
4119 uint32_t segsiz, bytes, reqbytes, us_cts;
4120 int32_t gput, new_rtt_diff, timely_says;
4121 uint64_t resid_bw, subpart = 0, addpart = 0, srtt;
4124 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
4125 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
4126 if (TSTMP_GEQ(us_cts, tp->gput_ts))
4127 tim = us_cts - tp->gput_ts;
4130 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts)
4131 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts;
4135 * Use the larger of the send time or ack time. This prevents us
4136 * from being influenced by ack artifacts to come up with too
4137 * high of measurement. Note that since we are spanning over many more
4138 * bytes in most of our measurements hopefully that is less likely to
4144 utim = max(stim, 1);
4145 /* Lets get a msec time ltim too for the old stuff */
4146 ltim = max(1, (utim / HPTS_USEC_IN_MSEC));
4147 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim;
4148 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz));
4149 if ((tim == 0) && (stim == 0)) {
4151 * Invalid measurement time, maybe
4152 * all on one ack/one send?
4156 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4157 0, 0, 0, 10, __LINE__, NULL, quality);
4158 goto skip_measurement;
4160 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) {
4161 /* We never made a us_rtt measurement? */
4164 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4165 0, 0, 0, 10, __LINE__, NULL, quality);
4166 goto skip_measurement;
4169 * Calculate the maximum possible b/w this connection
4170 * could have. We base our calculation on the lowest
4171 * rtt we have seen during the measurement and the
4172 * largest rwnd the client has given us in that time. This
4173 * forms a BDP that is the maximum that we could ever
4174 * get to the client. Anything larger is not valid.
4176 * I originally had code here that rejected measurements
4177 * where the time was less than 1/2 the latest us_rtt.
4178 * But after thinking on that I realized its wrong since
4179 * say you had a 150Mbps or even 1Gbps link, and you
4180 * were a long way away.. example I am in Europe (100ms rtt)
4181 * talking to my 1Gbps link in S.C. Now measuring say 150,000
4182 * bytes my time would be 1.2ms, and yet my rtt would say
4183 * the measurement was invalid the time was < 50ms. The
4184 * same thing is true for 150Mb (8ms of time).
4186 * A better way I realized is to look at what the maximum
4187 * the connection could possibly do. This is gated on
4188 * the lowest RTT we have seen and the highest rwnd.
4189 * We should in theory never exceed that, if we are
4190 * then something on the path is storing up packets
4191 * and then feeding them all at once to our endpoint
4192 * messing up our measurement.
4194 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd;
4195 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC;
4196 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt;
4197 if (SEQ_LT(th_ack, tp->gput_seq)) {
4198 /* No measurement can be made */
4201 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4202 0, 0, 0, 10, __LINE__, NULL, quality);
4203 goto skip_measurement;
4205 bytes = (th_ack - tp->gput_seq);
4206 bytes_ps = (uint64_t)bytes;
4208 * Don't measure a b/w for pacing unless we have gotten at least
4209 * an initial windows worth of data in this measurement interval.
4211 * Small numbers of bytes get badly influenced by delayed ack and
4212 * other artifacts. Note we take the initial window or our
4213 * defined minimum GP (defaulting to 10 which hopefully is the
4216 if (rack->rc_gp_filled == 0) {
4218 * The initial estimate is special. We
4219 * have blasted out an IW worth of packets
4220 * without a real valid ack ts results. We
4221 * then setup the app_limited_needs_set flag,
4222 * this should get the first ack in (probably 2
4223 * MSS worth) to be recorded as the timestamp.
4224 * We thus allow a smaller number of bytes i.e.
4227 reqbytes -= (2 * segsiz);
4228 /* Also lets fill previous for our first measurement to be neutral */
4229 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
4231 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) {
4232 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4233 rack->r_ctl.rc_app_limited_cnt,
4234 0, 0, 10, __LINE__, NULL, quality);
4235 goto skip_measurement;
4238 * We now need to calculate the Timely like status so
4239 * we can update (possibly) the b/w multipliers.
4241 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt;
4242 if (rack->rc_gp_filled == 0) {
4243 /* No previous reading */
4244 rack->r_ctl.rc_rtt_diff = new_rtt_diff;
4246 if (rack->measure_saw_probe_rtt == 0) {
4248 * We don't want a probertt to be counted
4249 * since it will be negative incorrectly. We
4250 * expect to be reducing the RTT when we
4251 * pace at a slower rate.
4253 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8);
4254 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8);
4257 timely_says = rack_make_timely_judgement(rack,
4258 rack->r_ctl.rc_gp_srtt,
4259 rack->r_ctl.rc_rtt_diff,
4260 rack->r_ctl.rc_prev_gp_srtt
4262 bytes_ps *= HPTS_USEC_IN_SEC;
4264 if (bytes_ps > rack->r_ctl.last_max_bw) {
4266 * Something is on path playing
4267 * since this b/w is not possible based
4268 * on our BDP (highest rwnd and lowest rtt
4269 * we saw in the measurement window).
4271 * Another option here would be to
4272 * instead skip the measurement.
4274 rack_log_pacing_delay_calc(rack, bytes, reqbytes,
4275 bytes_ps, rack->r_ctl.last_max_bw, 0,
4276 11, __LINE__, NULL, quality);
4277 bytes_ps = rack->r_ctl.last_max_bw;
4279 /* We store gp for b/w in bytes per second */
4280 if (rack->rc_gp_filled == 0) {
4281 /* Initial measurement */
4283 rack->r_ctl.gp_bw = bytes_ps;
4284 rack->rc_gp_filled = 1;
4285 rack->r_ctl.num_measurements = 1;
4286 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
4288 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4289 rack->r_ctl.rc_app_limited_cnt,
4290 0, 0, 10, __LINE__, NULL, quality);
4292 if (tcp_in_hpts(rack->rc_inp) &&
4293 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
4295 * Ok we can't trust the pacer in this case
4296 * where we transition from un-paced to paced.
4297 * Or for that matter when the burst mitigation
4298 * was making a wild guess and got it wrong.
4299 * Stop the pacer and clear up all the aggregate
4302 tcp_hpts_remove(rack->rc_inp);
4303 rack->r_ctl.rc_hpts_flags = 0;
4304 rack->r_ctl.rc_last_output_to = 0;
4307 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) {
4308 /* Still a small number run an average */
4309 rack->r_ctl.gp_bw += bytes_ps;
4310 addpart = rack->r_ctl.num_measurements;
4311 rack->r_ctl.num_measurements++;
4312 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
4313 /* We have collected enough to move forward */
4314 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements;
4319 * We want to take 1/wma of the goodput and add in to 7/8th
4320 * of the old value weighted by the srtt. So if your measurement
4321 * period is say 2 SRTT's long you would get 1/4 as the
4322 * value, if it was like 1/2 SRTT then you would get 1/16th.
4324 * But we must be careful not to take too much i.e. if the
4325 * srtt is say 20ms and the measurement is taken over
4326 * 400ms our weight would be 400/20 i.e. 20. On the
4327 * other hand if we get a measurement over 1ms with a
4328 * 10ms rtt we only want to take a much smaller portion.
4330 if (rack->r_ctl.num_measurements < 0xff) {
4331 rack->r_ctl.num_measurements++;
4333 srtt = (uint64_t)tp->t_srtt;
4336 * Strange why did t_srtt go back to zero?
4338 if (rack->r_ctl.rc_rack_min_rtt)
4339 srtt = rack->r_ctl.rc_rack_min_rtt;
4341 srtt = HPTS_USEC_IN_MSEC;
4344 * XXXrrs: Note for reviewers, in playing with
4345 * dynamic pacing I discovered this GP calculation
4346 * as done originally leads to some undesired results.
4347 * Basically you can get longer measurements contributing
4348 * too much to the WMA. Thus I changed it if you are doing
4349 * dynamic adjustments to only do the aportioned adjustment
4350 * if we have a very small (time wise) measurement. Longer
4351 * measurements just get there weight (defaulting to 1/8)
4352 * add to the WMA. We may want to think about changing
4353 * this to always do that for both sides i.e. dynamic
4354 * and non-dynamic... but considering lots of folks
4355 * were playing with this I did not want to change the
4356 * calculation per.se. without your thoughts.. Lawerence?
4359 if (rack->rc_gp_dyn_mul == 0) {
4360 subpart = rack->r_ctl.gp_bw * utim;
4361 subpart /= (srtt * 8);
4362 if (subpart < (rack->r_ctl.gp_bw / 2)) {
4364 * The b/w update takes no more
4365 * away then 1/2 our running total
4368 addpart = bytes_ps * utim;
4369 addpart /= (srtt * 8);
4372 * Don't allow a single measurement
4373 * to account for more than 1/2 of the
4374 * WMA. This could happen on a retransmission
4375 * where utim becomes huge compared to
4376 * srtt (multiple retransmissions when using
4377 * the sending rate which factors in all the
4378 * transmissions from the first one).
4380 subpart = rack->r_ctl.gp_bw / 2;
4381 addpart = bytes_ps / 2;
4383 resid_bw = rack->r_ctl.gp_bw - subpart;
4384 rack->r_ctl.gp_bw = resid_bw + addpart;
4387 if ((utim / srtt) <= 1) {
4389 * The b/w update was over a small period
4390 * of time. The idea here is to prevent a small
4391 * measurement time period from counting
4392 * too much. So we scale it based on the
4393 * time so it attributes less than 1/rack_wma_divisor
4394 * of its measurement.
4396 subpart = rack->r_ctl.gp_bw * utim;
4397 subpart /= (srtt * rack_wma_divisor);
4398 addpart = bytes_ps * utim;
4399 addpart /= (srtt * rack_wma_divisor);
4402 * The scaled measurement was long
4403 * enough so lets just add in the
4404 * portion of the measurement i.e. 1/rack_wma_divisor
4406 subpart = rack->r_ctl.gp_bw / rack_wma_divisor;
4407 addpart = bytes_ps / rack_wma_divisor;
4409 if ((rack->measure_saw_probe_rtt == 0) ||
4410 (bytes_ps > rack->r_ctl.gp_bw)) {
4412 * For probe-rtt we only add it in
4413 * if its larger, all others we just
4417 resid_bw = rack->r_ctl.gp_bw - subpart;
4418 rack->r_ctl.gp_bw = resid_bw + addpart;
4422 if ((rack->gp_ready == 0) &&
4423 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
4424 /* We have enough measurements now */
4426 rack_set_cc_pacing(rack);
4427 if (rack->defer_options)
4428 rack_apply_deferred_options(rack);
4430 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim,
4431 rack_get_bw(rack), 22, did_add, NULL, quality);
4432 /* We do not update any multipliers if we are in or have seen a probe-rtt */
4433 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set)
4434 rack_update_multiplier(rack, timely_says, bytes_ps,
4435 rack->r_ctl.rc_gp_srtt,
4436 rack->r_ctl.rc_rtt_diff);
4437 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim,
4438 rack_get_bw(rack), 3, line, NULL, quality);
4439 /* reset the gp srtt and setup the new prev */
4440 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
4441 /* Record the lost count for the next measurement */
4442 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count;
4444 * We restart our diffs based on the gpsrtt in the
4445 * measurement window.
4447 rack->rc_gp_rtt_set = 0;
4448 rack->rc_gp_saw_rec = 0;
4449 rack->rc_gp_saw_ca = 0;
4450 rack->rc_gp_saw_ss = 0;
4451 rack->rc_dragged_bottom = 0;
4455 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
4458 * XXXLAS: This is a temporary hack, and should be
4459 * chained off VOI_TCP_GPUT when stats(9) grows an
4460 * API to deal with chained VOIs.
4462 if (tp->t_stats_gput_prev > 0)
4463 stats_voi_update_abs_s32(tp->t_stats,
4465 ((gput - tp->t_stats_gput_prev) * 100) /
4466 tp->t_stats_gput_prev);
4468 tp->t_flags &= ~TF_GPUTINPROG;
4469 tp->t_stats_gput_prev = gput;
4471 * Now are we app limited now and there is space from where we
4472 * were to where we want to go?
4474 * We don't do the other case i.e. non-applimited here since
4475 * the next send will trigger us picking up the missing data.
4477 if (rack->r_ctl.rc_first_appl &&
4478 TCPS_HAVEESTABLISHED(tp->t_state) &&
4479 rack->r_ctl.rc_app_limited_cnt &&
4480 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) &&
4481 ((rack->r_ctl.rc_first_appl->r_end - th_ack) >
4482 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
4484 * Yep there is enough outstanding to make a measurement here.
4486 struct rack_sendmap *rsm, fe;
4488 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
4489 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
4490 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
4491 rack->app_limited_needs_set = 0;
4492 tp->gput_seq = th_ack;
4493 if (rack->in_probe_rtt)
4494 rack->measure_saw_probe_rtt = 1;
4495 else if ((rack->measure_saw_probe_rtt) &&
4496 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
4497 rack->measure_saw_probe_rtt = 0;
4498 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) {
4499 /* There is a full window to gain info from */
4500 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
4502 /* We can only measure up to the applimited point */
4503 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack);
4504 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) {
4506 * We don't have enough to make a measurement.
4508 tp->t_flags &= ~TF_GPUTINPROG;
4509 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
4510 0, 0, 0, 6, __LINE__, NULL, quality);
4514 if (tp->t_state >= TCPS_FIN_WAIT_1) {
4516 * We will get no more data into the SB
4517 * this means we need to have the data available
4518 * before we start a measurement.
4520 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < (tp->gput_ack - tp->gput_seq)) {
4521 /* Nope not enough data. */
4525 tp->t_flags |= TF_GPUTINPROG;
4527 * Now we need to find the timestamp of the send at tp->gput_seq
4528 * for the send based measurement.
4530 fe.r_start = tp->gput_seq;
4531 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
4533 /* Ok send-based limit is set */
4534 if (SEQ_LT(rsm->r_start, tp->gput_seq)) {
4536 * Move back to include the earlier part
4537 * so our ack time lines up right (this may
4538 * make an overlapping measurement but thats
4541 tp->gput_seq = rsm->r_start;
4543 if (rsm->r_flags & RACK_ACKED)
4544 tp->gput_ts = (uint32_t)rsm->r_ack_arrival;
4546 rack->app_limited_needs_set = 1;
4547 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
4550 * If we don't find the rsm due to some
4551 * send-limit set the current time, which
4552 * basically disables the send-limit.
4557 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
4559 rack_log_pacing_delay_calc(rack,
4564 rack->r_ctl.rc_app_limited_cnt,
4566 __LINE__, NULL, quality);
4571 * CC wrapper hook functions
4574 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs,
4575 uint16_t type, int32_t recovery)
4577 uint32_t prior_cwnd, acked;
4578 struct tcp_log_buffer *lgb = NULL;
4579 uint8_t labc_to_use, quality;
4581 INP_WLOCK_ASSERT(tp->t_inpcb);
4582 tp->ccv->nsegs = nsegs;
4583 acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una);
4584 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
4587 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp);
4588 if (tp->ccv->bytes_this_ack > max) {
4589 tp->ccv->bytes_this_ack = max;
4593 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
4594 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd);
4596 quality = RACK_QUALITY_NONE;
4597 if ((tp->t_flags & TF_GPUTINPROG) &&
4598 rack_enough_for_measurement(tp, rack, th_ack, &quality)) {
4599 /* Measure the Goodput */
4600 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality);
4601 #ifdef NETFLIX_PEAKRATE
4602 if ((type == CC_ACK) &&
4603 (tp->t_maxpeakrate)) {
4605 * We update t_peakrate_thr. This gives us roughly
4606 * one update per round trip time. Note
4607 * it will only be used if pace_always is off i.e
4608 * we don't do this for paced flows.
4610 rack_update_peakrate_thr(tp);
4614 /* Which way our we limited, if not cwnd limited no advance in CA */
4615 if (tp->snd_cwnd <= tp->snd_wnd)
4616 tp->ccv->flags |= CCF_CWND_LIMITED;
4618 tp->ccv->flags &= ~CCF_CWND_LIMITED;
4619 if (tp->snd_cwnd > tp->snd_ssthresh) {
4620 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
4621 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp));
4622 /* For the setting of a window past use the actual scwnd we are using */
4623 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) {
4624 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use;
4625 tp->ccv->flags |= CCF_ABC_SENTAWND;
4628 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
4629 tp->t_bytes_acked = 0;
4631 prior_cwnd = tp->snd_cwnd;
4632 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec ||
4633 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf)))
4634 labc_to_use = rack->rc_labc;
4636 labc_to_use = rack_max_abc_post_recovery;
4637 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
4638 union tcp_log_stackspecific log;
4641 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4642 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
4643 log.u_bbr.flex1 = th_ack;
4644 log.u_bbr.flex2 = tp->ccv->flags;
4645 log.u_bbr.flex3 = tp->ccv->bytes_this_ack;
4646 log.u_bbr.flex4 = tp->ccv->nsegs;
4647 log.u_bbr.flex5 = labc_to_use;
4648 log.u_bbr.flex6 = prior_cwnd;
4649 log.u_bbr.flex7 = V_tcp_do_newsack;
4650 log.u_bbr.flex8 = 1;
4651 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
4652 0, &log, false, NULL, NULL, 0, &tv);
4654 if (CC_ALGO(tp)->ack_received != NULL) {
4655 /* XXXLAS: Find a way to live without this */
4656 tp->ccv->curack = th_ack;
4657 tp->ccv->labc = labc_to_use;
4658 tp->ccv->flags |= CCF_USE_LOCAL_ABC;
4659 CC_ALGO(tp)->ack_received(tp->ccv, type);
4662 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd;
4664 if (rack->r_must_retran) {
4665 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) {
4667 * We now are beyond the rxt point so lets disable
4670 rack->r_ctl.rc_out_at_rto = 0;
4671 rack->r_must_retran = 0;
4672 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) {
4674 * Only decrement the rc_out_at_rto if the cwnd advances
4675 * at least a whole segment. Otherwise next time the peer
4676 * acks, we won't be able to send this generaly happens
4677 * when we are in Congestion Avoidance.
4679 if (acked <= rack->r_ctl.rc_out_at_rto){
4680 rack->r_ctl.rc_out_at_rto -= acked;
4682 rack->r_ctl.rc_out_at_rto = 0;
4687 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use);
4689 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) {
4690 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use;
4692 #ifdef NETFLIX_PEAKRATE
4693 /* we enforce max peak rate if it is set and we are not pacing */
4694 if ((rack->rc_always_pace == 0) &&
4695 tp->t_peakrate_thr &&
4696 (tp->snd_cwnd > tp->t_peakrate_thr)) {
4697 tp->snd_cwnd = tp->t_peakrate_thr;
4703 tcp_rack_partialack(struct tcpcb *tp)
4705 struct tcp_rack *rack;
4707 rack = (struct tcp_rack *)tp->t_fb_ptr;
4708 INP_WLOCK_ASSERT(tp->t_inpcb);
4710 * If we are doing PRR and have enough
4711 * room to send <or> we are pacing and prr
4712 * is disabled we will want to see if we
4713 * can send data (by setting r_wanted_output to
4716 if ((rack->r_ctl.rc_prr_sndcnt > 0) ||
4718 rack->r_wanted_output = 1;
4722 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack)
4724 struct tcp_rack *rack;
4727 orig_cwnd = tp->snd_cwnd;
4728 INP_WLOCK_ASSERT(tp->t_inpcb);
4729 rack = (struct tcp_rack *)tp->t_fb_ptr;
4730 /* only alert CC if we alerted when we entered */
4731 if (CC_ALGO(tp)->post_recovery != NULL) {
4732 tp->ccv->curack = th_ack;
4733 CC_ALGO(tp)->post_recovery(tp->ccv);
4734 if (tp->snd_cwnd < tp->snd_ssthresh) {
4736 * Rack has burst control and pacing
4737 * so lets not set this any lower than
4738 * snd_ssthresh per RFC-6582 (option 2).
4740 tp->snd_cwnd = tp->snd_ssthresh;
4743 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
4744 union tcp_log_stackspecific log;
4747 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4748 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
4749 log.u_bbr.flex1 = th_ack;
4750 log.u_bbr.flex2 = tp->ccv->flags;
4751 log.u_bbr.flex3 = tp->ccv->bytes_this_ack;
4752 log.u_bbr.flex4 = tp->ccv->nsegs;
4753 log.u_bbr.flex5 = V_tcp_abc_l_var;
4754 log.u_bbr.flex6 = orig_cwnd;
4755 log.u_bbr.flex7 = V_tcp_do_newsack;
4756 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
4757 log.u_bbr.flex8 = 2;
4758 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
4759 0, &log, false, NULL, NULL, 0, &tv);
4761 if ((rack->rack_no_prr == 0) &&
4762 (rack->no_prr_addback == 0) &&
4763 (rack->r_ctl.rc_prr_sndcnt > 0)) {
4765 * Suck the next prr cnt back into cwnd, but
4766 * only do that if we are not application limited.
4768 if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
4770 * We are allowed to add back to the cwnd the amount we did
4772 * a) no_prr_addback is off.
4773 * b) we are not app limited
4774 * c) we are doing prr
4776 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none).
4778 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax),
4779 rack->r_ctl.rc_prr_sndcnt);
4781 rack->r_ctl.rc_prr_sndcnt = 0;
4782 rack_log_to_prr(rack, 1, 0, __LINE__);
4784 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__);
4785 tp->snd_recover = tp->snd_una;
4786 if (rack->r_ctl.dsack_persist) {
4787 rack->r_ctl.dsack_persist--;
4788 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
4789 rack->r_ctl.num_dsack = 0;
4791 rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
4793 EXIT_RECOVERY(tp->t_flags);
4797 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line)
4799 struct tcp_rack *rack;
4800 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd;
4802 INP_WLOCK_ASSERT(tp->t_inpcb);
4804 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
4806 if (IN_RECOVERY(tp->t_flags) == 0) {
4807 in_rec_at_entry = 0;
4808 ssthresh_enter = tp->snd_ssthresh;
4809 cwnd_enter = tp->snd_cwnd;
4811 in_rec_at_entry = 1;
4812 rack = (struct tcp_rack *)tp->t_fb_ptr;
4815 tp->t_flags &= ~TF_WASFRECOVERY;
4816 tp->t_flags &= ~TF_WASCRECOVERY;
4817 if (!IN_FASTRECOVERY(tp->t_flags)) {
4818 rack->r_ctl.rc_prr_delivered = 0;
4819 rack->r_ctl.rc_prr_out = 0;
4820 if (rack->rack_no_prr == 0) {
4821 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
4822 rack_log_to_prr(rack, 2, in_rec_at_entry, line);
4824 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
4825 tp->snd_recover = tp->snd_max;
4826 if (tp->t_flags2 & TF2_ECN_PERMIT)
4827 tp->t_flags2 |= TF2_ECN_SND_CWR;
4831 if (!IN_CONGRECOVERY(tp->t_flags) ||
4833 * Allow ECN reaction on ACK to CWR, if
4834 * that data segment was also CE marked.
4836 SEQ_GEQ(ack, tp->snd_recover)) {
4837 EXIT_CONGRECOVERY(tp->t_flags);
4838 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd);
4839 tp->snd_recover = tp->snd_max + 1;
4840 if (tp->t_flags2 & TF2_ECN_PERMIT)
4841 tp->t_flags2 |= TF2_ECN_SND_CWR;
4846 tp->t_bytes_acked = 0;
4847 EXIT_RECOVERY(tp->t_flags);
4848 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 /
4849 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp);
4850 orig_cwnd = tp->snd_cwnd;
4851 tp->snd_cwnd = ctf_fixed_maxseg(tp);
4852 rack_log_to_prr(rack, 16, orig_cwnd, line);
4853 if (tp->t_flags2 & TF2_ECN_PERMIT)
4854 tp->t_flags2 |= TF2_ECN_SND_CWR;
4857 KMOD_TCPSTAT_INC(tcps_sndrexmitbad);
4858 /* RTO was unnecessary, so reset everything. */
4859 tp->snd_cwnd = tp->snd_cwnd_prev;
4860 tp->snd_ssthresh = tp->snd_ssthresh_prev;
4861 tp->snd_recover = tp->snd_recover_prev;
4862 if (tp->t_flags & TF_WASFRECOVERY) {
4863 ENTER_FASTRECOVERY(tp->t_flags);
4864 tp->t_flags &= ~TF_WASFRECOVERY;
4866 if (tp->t_flags & TF_WASCRECOVERY) {
4867 ENTER_CONGRECOVERY(tp->t_flags);
4868 tp->t_flags &= ~TF_WASCRECOVERY;
4870 tp->snd_nxt = tp->snd_max;
4871 tp->t_badrxtwin = 0;
4874 if ((CC_ALGO(tp)->cong_signal != NULL) &&
4876 tp->ccv->curack = ack;
4877 CC_ALGO(tp)->cong_signal(tp->ccv, type);
4879 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) {
4880 rack_log_to_prr(rack, 15, cwnd_enter, line);
4881 rack->r_ctl.dsack_byte_cnt = 0;
4882 rack->r_ctl.retran_during_recovery = 0;
4883 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter;
4884 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter;
4885 rack->r_ent_rec_ns = 1;
4890 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp)
4894 INP_WLOCK_ASSERT(tp->t_inpcb);
4896 #ifdef NETFLIX_STATS
4897 KMOD_TCPSTAT_INC(tcps_idle_restarts);
4898 if (tp->t_state == TCPS_ESTABLISHED)
4899 KMOD_TCPSTAT_INC(tcps_idle_estrestarts);
4901 if (CC_ALGO(tp)->after_idle != NULL)
4902 CC_ALGO(tp)->after_idle(tp->ccv);
4904 if (tp->snd_cwnd == 1)
4905 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
4907 i_cwnd = rc_init_window(rack);
4910 * Being idle is no different than the initial window. If the cc
4911 * clamps it down below the initial window raise it to the initial
4914 if (tp->snd_cwnd < i_cwnd) {
4915 tp->snd_cwnd = i_cwnd;
4920 * Indicate whether this ack should be delayed. We can delay the ack if
4921 * following conditions are met:
4922 * - There is no delayed ack timer in progress.
4923 * - Our last ack wasn't a 0-sized window. We never want to delay
4924 * the ack that opens up a 0-sized window.
4925 * - LRO wasn't used for this segment. We make sure by checking that the
4926 * segment size is not larger than the MSS.
4927 * - Delayed acks are enabled or this is a half-synchronized T/TCP
4930 #define DELAY_ACK(tp, tlen) \
4931 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
4932 ((tp->t_flags & TF_DELACK) == 0) && \
4933 (tlen <= tp->t_maxseg) && \
4934 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
4936 static struct rack_sendmap *
4937 rack_find_lowest_rsm(struct tcp_rack *rack)
4939 struct rack_sendmap *rsm;
4942 * Walk the time-order transmitted list looking for an rsm that is
4943 * not acked. This will be the one that was sent the longest time
4944 * ago that is still outstanding.
4946 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
4947 if (rsm->r_flags & RACK_ACKED) {
4956 static struct rack_sendmap *
4957 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
4959 struct rack_sendmap *prsm;
4962 * Walk the sequence order list backward until we hit and arrive at
4963 * the highest seq not acked. In theory when this is called it
4964 * should be the last segment (which it was not).
4967 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) {
4968 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
4977 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts)
4983 * lro is the flag we use to determine if we have seen reordering.
4984 * If it gets set we have seen reordering. The reorder logic either
4985 * works in one of two ways:
4987 * If reorder-fade is configured, then we track the last time we saw
4988 * re-ordering occur. If we reach the point where enough time as
4989 * passed we no longer consider reordering has occuring.
4991 * Or if reorder-face is 0, then once we see reordering we consider
4992 * the connection to alway be subject to reordering and just set lro
4995 * In the end if lro is non-zero we add the extra time for
5000 if (rack->r_ctl.rc_reorder_ts) {
5001 if (rack->r_ctl.rc_reorder_fade) {
5002 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
5003 lro = cts - rack->r_ctl.rc_reorder_ts;
5006 * No time as passed since the last
5007 * reorder, mark it as reordering.
5012 /* Negative time? */
5015 if (lro > rack->r_ctl.rc_reorder_fade) {
5016 /* Turn off reordering seen too */
5017 rack->r_ctl.rc_reorder_ts = 0;
5021 /* Reodering does not fade */
5027 if (rack->rc_rack_tmr_std_based == 0) {
5028 thresh = srtt + rack->r_ctl.rc_pkt_delay;
5030 /* Standards based pkt-delay is 1/4 srtt */
5031 thresh = srtt + (srtt >> 2);
5033 if (lro && (rack->rc_rack_tmr_std_based == 0)) {
5034 /* It must be set, if not you get 1/4 rtt */
5035 if (rack->r_ctl.rc_reorder_shift)
5036 thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
5038 thresh += (srtt >> 2);
5040 if (rack->rc_rack_use_dsack &&
5042 (rack->r_ctl.num_dsack > 0)) {
5044 * We only increase the reordering window if we
5045 * have seen reordering <and> we have a DSACK count.
5047 thresh += rack->r_ctl.num_dsack * (srtt >> 2);
5048 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh);
5050 /* SRTT * 2 is the ceiling */
5051 if (thresh > (srtt * 2)) {
5054 /* And we don't want it above the RTO max either */
5055 if (thresh > rack_rto_max) {
5056 thresh = rack_rto_max;
5058 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh);
5063 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
5064 struct rack_sendmap *rsm, uint32_t srtt)
5066 struct rack_sendmap *prsm;
5067 uint32_t thresh, len;
5072 if (rack->r_ctl.rc_tlp_threshold)
5073 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
5075 thresh = (srtt * 2);
5077 /* Get the previous sent packet, if any */
5078 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
5079 len = rsm->r_end - rsm->r_start;
5080 if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
5081 /* Exactly like the ID */
5082 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) {
5083 uint32_t alt_thresh;
5085 * Compensate for delayed-ack with the d-ack time.
5087 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5088 if (alt_thresh > thresh)
5089 thresh = alt_thresh;
5091 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
5093 prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
5094 if (prsm && (len <= segsiz)) {
5096 * Two packets outstanding, thresh should be (2*srtt) +
5097 * possible inter-packet delay (if any).
5099 uint32_t inter_gap = 0;
5102 idx = rsm->r_rtr_cnt - 1;
5103 nidx = prsm->r_rtr_cnt - 1;
5104 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) {
5105 /* Yes it was sent later (or at the same time) */
5106 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
5108 thresh += inter_gap;
5109 } else if (len <= segsiz) {
5111 * Possibly compensate for delayed-ack.
5113 uint32_t alt_thresh;
5115 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5116 if (alt_thresh > thresh)
5117 thresh = alt_thresh;
5119 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
5121 if (len <= segsiz) {
5122 uint32_t alt_thresh;
5124 * Compensate for delayed-ack with the d-ack time.
5126 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5127 if (alt_thresh > thresh)
5128 thresh = alt_thresh;
5131 /* Not above an RTO */
5132 if (thresh > tp->t_rxtcur) {
5133 thresh = tp->t_rxtcur;
5135 /* Not above a RTO max */
5136 if (thresh > rack_rto_max) {
5137 thresh = rack_rto_max;
5139 /* Apply user supplied min TLP */
5140 if (thresh < rack_tlp_min) {
5141 thresh = rack_tlp_min;
5147 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack)
5150 * We want the rack_rtt which is the
5151 * last rtt we measured. However if that
5152 * does not exist we fallback to the srtt (which
5153 * we probably will never do) and then as a last
5154 * resort we use RACK_INITIAL_RTO if no srtt is
5157 if (rack->rc_rack_rtt)
5158 return (rack->rc_rack_rtt);
5159 else if (tp->t_srtt == 0)
5160 return (RACK_INITIAL_RTO);
5161 return (tp->t_srtt);
5164 static struct rack_sendmap *
5165 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
5168 * Check to see that we don't need to fall into recovery. We will
5169 * need to do so if our oldest transmit is past the time we should
5172 struct tcp_rack *rack;
5173 struct rack_sendmap *rsm;
5175 uint32_t srtt, thresh;
5177 rack = (struct tcp_rack *)tp->t_fb_ptr;
5178 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
5181 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5186 if (rsm->r_flags & RACK_ACKED) {
5187 rsm = rack_find_lowest_rsm(rack);
5191 idx = rsm->r_rtr_cnt - 1;
5192 srtt = rack_grab_rtt(tp, rack);
5193 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
5194 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) {
5197 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) {
5200 /* Ok if we reach here we are over-due and this guy can be sent */
5201 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
5206 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
5212 t = (tp->t_srtt + (tp->t_rttvar << 2));
5213 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
5214 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop);
5215 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
5216 ret_val = (uint32_t)tt;
5221 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack)
5224 * Start the FR timer, we do this based on getting the first one in
5225 * the rc_tmap. Note that if its NULL we must stop the timer. in all
5226 * events we need to stop the running timer (if its running) before
5227 * starting the new one.
5229 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse;
5232 int32_t is_tlp_timer = 0;
5233 struct rack_sendmap *rsm;
5235 if (rack->t_timers_stopped) {
5236 /* All timers have been stopped none are to run */
5239 if (rack->rc_in_persist) {
5240 /* We can't start any timer in persists */
5241 return (rack_get_persists_timer_val(tp, rack));
5243 rack->rc_on_min_to = 0;
5244 if ((tp->t_state < TCPS_ESTABLISHED) ||
5245 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
5248 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5249 if ((rsm == NULL) || sup_rack) {
5250 /* Nothing on the send map or no rack */
5252 time_since_sent = 0;
5253 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5256 * Should we discount the RTX timer any?
5258 * We want to discount it the smallest amount.
5259 * If a timer (Rack/TLP or RXT) has gone off more
5260 * recently thats the discount we want to use (now - timer time).
5261 * If the retransmit of the oldest packet was more recent then
5262 * we want to use that (now - oldest-packet-last_transmit_time).
5265 idx = rsm->r_rtr_cnt - 1;
5266 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx])))
5267 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
5269 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
5270 if (TSTMP_GT(cts, tstmp_touse))
5271 time_since_sent = cts - tstmp_touse;
5273 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
5274 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
5276 if (to > time_since_sent)
5277 to -= time_since_sent;
5279 to = rack->r_ctl.rc_min_to;
5282 /* Special case for KEEPINIT */
5283 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
5284 (TP_KEEPINIT(tp) != 0) &&
5287 * We have to put a ceiling on the rxt timer
5288 * of the keep-init timeout.
5290 uint32_t max_time, red;
5292 max_time = TICKS_2_USEC(TP_KEEPINIT(tp));
5293 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) {
5294 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]);
5300 /* Reduce timeout to the keep value if needed */
5308 if (rsm->r_flags & RACK_ACKED) {
5309 rsm = rack_find_lowest_rsm(rack);
5315 if (rack->sack_attack_disable) {
5317 * We don't want to do
5318 * any TLP's if you are an attacker.
5319 * Though if you are doing what
5320 * is expected you may still have
5321 * SACK-PASSED marks.
5325 /* Convert from ms to usecs */
5326 if ((rsm->r_flags & RACK_SACK_PASSED) ||
5327 (rsm->r_flags & RACK_RWND_COLLAPSED) ||
5328 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
5329 if ((tp->t_flags & TF_SENTFIN) &&
5330 ((tp->snd_max - tp->snd_una) == 1) &&
5331 (rsm->r_flags & RACK_HAS_FIN)) {
5333 * We don't start a rack timer if all we have is a
5338 if ((rack->use_rack_rr == 0) &&
5339 (IN_FASTRECOVERY(tp->t_flags)) &&
5340 (rack->rack_no_prr == 0) &&
5341 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) {
5343 * We are not cheating, in recovery and
5344 * not enough ack's to yet get our next
5345 * retransmission out.
5347 * Note that classified attackers do not
5348 * get to use the rack-cheat.
5352 srtt = rack_grab_rtt(tp, rack);
5353 thresh = rack_calc_thresh_rack(rack, srtt, cts);
5354 idx = rsm->r_rtr_cnt - 1;
5355 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh;
5356 if (SEQ_GEQ(exp, cts)) {
5358 if (to < rack->r_ctl.rc_min_to) {
5359 to = rack->r_ctl.rc_min_to;
5360 if (rack->r_rr_config == 3)
5361 rack->rc_on_min_to = 1;
5364 to = rack->r_ctl.rc_min_to;
5365 if (rack->r_rr_config == 3)
5366 rack->rc_on_min_to = 1;
5369 /* Ok we need to do a TLP not RACK */
5371 if ((rack->rc_tlp_in_progress != 0) &&
5372 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) {
5374 * The previous send was a TLP and we have sent
5375 * N TLP's without sending new data.
5379 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
5381 /* We found no rsm to TLP with. */
5384 if (rsm->r_flags & RACK_HAS_FIN) {
5385 /* If its a FIN we dont do TLP */
5389 idx = rsm->r_rtr_cnt - 1;
5390 time_since_sent = 0;
5391 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time))
5392 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
5394 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
5395 if (TSTMP_GT(cts, tstmp_touse))
5396 time_since_sent = cts - tstmp_touse;
5399 if ((rack->rc_srtt_measure_made == 0) &&
5400 (tp->t_srtt == 1)) {
5402 * If another stack as run and set srtt to 1,
5403 * then the srtt was 0, so lets use the initial.
5405 srtt = RACK_INITIAL_RTO;
5407 srtt_cur = tp->t_srtt;
5411 srtt = RACK_INITIAL_RTO;
5413 * If the SRTT is not keeping up and the
5414 * rack RTT has spiked we want to use
5415 * the last RTT not the smoothed one.
5417 if (rack_tlp_use_greater &&
5419 (srtt < rack_grab_rtt(tp, rack))) {
5420 srtt = rack_grab_rtt(tp, rack);
5422 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
5423 if (thresh > time_since_sent) {
5424 to = thresh - time_since_sent;
5426 to = rack->r_ctl.rc_min_to;
5427 rack_log_alt_to_to_cancel(rack,
5429 time_since_sent, /* flex2 */
5430 tstmp_touse, /* flex3 */
5431 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */
5432 (uint32_t)rsm->r_tim_lastsent[idx],
5436 if (to < rack_tlp_min) {
5439 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) {
5441 * If the TLP time works out to larger than the max
5442 * RTO lets not do TLP.. just RTO.
5447 if (is_tlp_timer == 0) {
5448 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
5450 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
5458 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5460 if (rack->rc_in_persist == 0) {
5461 if (tp->t_flags & TF_GPUTINPROG) {
5463 * Stop the goodput now, the calling of the
5464 * measurement function clears the flag.
5466 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__,
5467 RACK_QUALITY_PERSIST);
5469 #ifdef NETFLIX_SHARED_CWND
5470 if (rack->r_ctl.rc_scw) {
5471 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
5472 rack->rack_scwnd_is_idle = 1;
5475 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
5476 if (rack->r_ctl.rc_went_idle_time == 0)
5477 rack->r_ctl.rc_went_idle_time = 1;
5478 rack_timer_cancel(tp, rack, cts, __LINE__);
5479 rack->r_ctl.persist_lost_ends = 0;
5480 rack->probe_not_answered = 0;
5481 rack->forced_ack = 0;
5483 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
5484 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
5485 rack->rc_in_persist = 1;
5490 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5492 if (tcp_in_hpts(rack->rc_inp)) {
5493 tcp_hpts_remove(rack->rc_inp);
5494 rack->r_ctl.rc_hpts_flags = 0;
5496 #ifdef NETFLIX_SHARED_CWND
5497 if (rack->r_ctl.rc_scw) {
5498 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
5499 rack->rack_scwnd_is_idle = 0;
5502 if (rack->rc_gp_dyn_mul &&
5503 (rack->use_fixed_rate == 0) &&
5504 (rack->rc_always_pace)) {
5506 * Do we count this as if a probe-rtt just
5509 uint32_t time_idle, idle_min;
5511 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time;
5512 idle_min = rack_min_probertt_hold;
5513 if (rack_probertt_gpsrtt_cnt_div) {
5515 extra = (uint64_t)rack->r_ctl.rc_gp_srtt *
5516 (uint64_t)rack_probertt_gpsrtt_cnt_mul;
5517 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div;
5518 idle_min += (uint32_t)extra;
5520 if (time_idle >= idle_min) {
5521 /* Yes, we count it as a probe-rtt. */
5524 us_cts = tcp_get_usecs(NULL);
5525 if (rack->in_probe_rtt == 0) {
5526 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
5527 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
5528 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
5529 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
5531 rack_exit_probertt(rack, us_cts);
5535 rack->rc_in_persist = 0;
5536 rack->r_ctl.rc_went_idle_time = 0;
5538 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
5539 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
5540 rack->r_ctl.rc_agg_delayed = 0;
5543 rack->r_ctl.rc_agg_early = 0;
5547 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
5548 struct hpts_diag *diag, struct timeval *tv)
5550 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
5551 union tcp_log_stackspecific log;
5553 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5554 log.u_bbr.flex1 = diag->p_nxt_slot;
5555 log.u_bbr.flex2 = diag->p_cur_slot;
5556 log.u_bbr.flex3 = diag->slot_req;
5557 log.u_bbr.flex4 = diag->inp_hptsslot;
5558 log.u_bbr.flex5 = diag->slot_remaining;
5559 log.u_bbr.flex6 = diag->need_new_to;
5560 log.u_bbr.flex7 = diag->p_hpts_active;
5561 log.u_bbr.flex8 = diag->p_on_min_sleep;
5562 /* Hijack other fields as needed */
5563 log.u_bbr.epoch = diag->have_slept;
5564 log.u_bbr.lt_epoch = diag->yet_to_sleep;
5565 log.u_bbr.pkts_out = diag->co_ret;
5566 log.u_bbr.applimited = diag->hpts_sleep_time;
5567 log.u_bbr.delivered = diag->p_prev_slot;
5568 log.u_bbr.inflight = diag->p_runningslot;
5569 log.u_bbr.bw_inuse = diag->wheel_slot;
5570 log.u_bbr.rttProp = diag->wheel_cts;
5571 log.u_bbr.timeStamp = cts;
5572 log.u_bbr.delRate = diag->maxslots;
5573 log.u_bbr.cur_del_rate = diag->p_curtick;
5574 log.u_bbr.cur_del_rate <<= 32;
5575 log.u_bbr.cur_del_rate |= diag->p_lasttick;
5576 TCP_LOG_EVENTP(rack->rc_tp, NULL,
5577 &rack->rc_inp->inp_socket->so_rcv,
5578 &rack->rc_inp->inp_socket->so_snd,
5579 BBR_LOG_HPTSDIAG, 0,
5580 0, &log, false, tv);
5586 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type)
5588 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
5589 union tcp_log_stackspecific log;
5592 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5593 log.u_bbr.flex1 = sb->sb_flags;
5594 log.u_bbr.flex2 = len;
5595 log.u_bbr.flex3 = sb->sb_state;
5596 log.u_bbr.flex8 = type;
5597 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
5598 TCP_LOG_EVENTP(rack->rc_tp, NULL,
5599 &rack->rc_inp->inp_socket->so_rcv,
5600 &rack->rc_inp->inp_socket->so_snd,
5602 len, &log, false, &tv);
5607 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
5608 int32_t slot, uint32_t tot_len_this_send, int sup_rack)
5610 struct hpts_diag diag;
5613 uint32_t delayed_ack = 0;
5614 uint32_t hpts_timeout;
5615 uint32_t entry_slot = slot;
5621 if ((tp->t_state == TCPS_CLOSED) ||
5622 (tp->t_state == TCPS_LISTEN)) {
5625 if (tcp_in_hpts(inp)) {
5626 /* Already on the pacer */
5629 stopped = rack->rc_tmr_stopped;
5630 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
5631 left = rack->r_ctl.rc_timer_exp - cts;
5633 rack->r_ctl.rc_timer_exp = 0;
5634 rack->r_ctl.rc_hpts_flags = 0;
5635 us_cts = tcp_get_usecs(&tv);
5636 /* Now early/late accounting */
5637 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0);
5638 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) {
5640 * We have a early carry over set,
5641 * we can always add more time so we
5642 * can always make this compensation.
5644 * Note if ack's are allowed to wake us do not
5645 * penalize the next timer for being awoke
5646 * by an ack aka the rc_agg_early (non-paced mode).
5648 slot += rack->r_ctl.rc_agg_early;
5650 rack->r_ctl.rc_agg_early = 0;
5654 * This is harder, we can
5655 * compensate some but it
5656 * really depends on what
5657 * the current pacing time is.
5659 if (rack->r_ctl.rc_agg_delayed >= slot) {
5661 * We can't compensate for it all.
5662 * And we have to have some time
5663 * on the clock. We always have a min
5664 * 10 slots (10 x 10 i.e. 100 usecs).
5666 if (slot <= HPTS_TICKS_PER_SLOT) {
5668 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot);
5669 slot = HPTS_TICKS_PER_SLOT;
5671 /* We take off some */
5672 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT);
5673 slot = HPTS_TICKS_PER_SLOT;
5676 slot -= rack->r_ctl.rc_agg_delayed;
5677 rack->r_ctl.rc_agg_delayed = 0;
5678 /* Make sure we have 100 useconds at minimum */
5679 if (slot < HPTS_TICKS_PER_SLOT) {
5680 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot;
5681 slot = HPTS_TICKS_PER_SLOT;
5683 if (rack->r_ctl.rc_agg_delayed == 0)
5688 /* We are pacing too */
5689 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
5691 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack);
5692 #ifdef NETFLIX_EXP_DETECTION
5693 if (rack->sack_attack_disable &&
5694 (slot < tcp_sad_pacing_interval)) {
5696 * We have a potential attacker on
5697 * the line. We have possibly some
5698 * (or now) pacing time set. We want to
5699 * slow down the processing of sacks by some
5700 * amount (if it is an attacker). Set the default
5701 * slot for attackers in place (unless the orginal
5702 * interval is longer). Its stored in
5703 * micro-seconds, so lets convert to msecs.
5705 slot = tcp_sad_pacing_interval;
5708 if (tp->t_flags & TF_DELACK) {
5709 delayed_ack = TICKS_2_USEC(tcp_delacktime);
5710 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
5712 if (delayed_ack && ((hpts_timeout == 0) ||
5713 (delayed_ack < hpts_timeout)))
5714 hpts_timeout = delayed_ack;
5716 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
5718 * If no timers are going to run and we will fall off the hptsi
5719 * wheel, we resort to a keep-alive timer if its configured.
5721 if ((hpts_timeout == 0) &&
5723 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
5724 (tp->t_state <= TCPS_CLOSING)) {
5726 * Ok we have no timer (persists, rack, tlp, rxt or
5727 * del-ack), we don't have segments being paced. So
5728 * all that is left is the keepalive timer.
5730 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
5731 /* Get the established keep-alive time */
5732 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp));
5735 * Get the initial setup keep-alive time,
5736 * note that this is probably not going to
5737 * happen, since rack will be running a rxt timer
5738 * if a SYN of some sort is outstanding. It is
5739 * actually handled in rack_timeout_rxt().
5741 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp));
5743 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
5744 if (rack->in_probe_rtt) {
5746 * We want to instead not wake up a long time from
5747 * now but to wake up about the time we would
5748 * exit probe-rtt and initiate a keep-alive ack.
5749 * This will get us out of probe-rtt and update
5752 hpts_timeout = rack_min_probertt_hold;
5756 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
5757 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
5759 * RACK, TLP, persists and RXT timers all are restartable
5760 * based on actions input .. i.e we received a packet (ack
5761 * or sack) and that changes things (rw, or snd_una etc).
5762 * Thus we can restart them with a new value. For
5763 * keep-alive, delayed_ack we keep track of what was left
5764 * and restart the timer with a smaller value.
5766 if (left < hpts_timeout)
5767 hpts_timeout = left;
5771 * Hack alert for now we can't time-out over 2,147,483
5772 * seconds (a bit more than 596 hours), which is probably ok
5775 if (hpts_timeout > 0x7ffffffe)
5776 hpts_timeout = 0x7ffffffe;
5777 rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
5779 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0);
5780 if ((rack->gp_ready == 0) &&
5781 (rack->use_fixed_rate == 0) &&
5782 (hpts_timeout < slot) &&
5783 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
5785 * We have no good estimate yet for the
5786 * old clunky burst mitigation or the
5787 * real pacing. And the tlp or rxt is smaller
5788 * than the pacing calculation. Lets not
5789 * pace that long since we know the calculation
5790 * so far is not accurate.
5792 slot = hpts_timeout;
5795 * Turn off all the flags for queuing by default. The
5796 * flags have important meanings to what happens when
5797 * LRO interacts with the transport. Most likely (by default now)
5798 * mbuf_queueing and ack compression are on. So the transport
5799 * has a couple of flags that control what happens (if those
5800 * are not on then these flags won't have any effect since it
5801 * won't go through the queuing LRO path).
5803 * INP_MBUF_QUEUE_READY - This flags says that I am busy
5804 * pacing output, so don't disturb. But
5805 * it also means LRO can wake me if there
5806 * is a SACK arrival.
5808 * INP_DONT_SACK_QUEUE - This flag is used in conjunction
5809 * with the above flag (QUEUE_READY) and
5810 * when present it says don't even wake me
5811 * if a SACK arrives.
5813 * The idea behind these flags is that if we are pacing we
5814 * set the MBUF_QUEUE_READY and only get woken up if
5815 * a SACK arrives (which could change things) or if
5816 * our pacing timer expires. If, however, we have a rack
5817 * timer running, then we don't even want a sack to wake
5818 * us since the rack timer has to expire before we can send.
5820 * Other cases should usually have none of the flags set
5821 * so LRO can call into us.
5823 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
5825 rack->r_ctl.rc_last_output_to = us_cts + slot;
5827 * A pacing timer (slot) is being set, in
5828 * such a case we cannot send (we are blocked by
5829 * the timer). So lets tell LRO that it should not
5830 * wake us unless there is a SACK. Note this only
5831 * will be effective if mbuf queueing is on or
5832 * compressed acks are being processed.
5834 inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
5836 * But wait if we have a Rack timer running
5837 * even a SACK should not disturb us (with
5838 * the exception of r_rr_config 3).
5840 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) &&
5841 (rack->r_rr_config != 3))
5842 inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
5843 if (rack->rc_ack_can_sendout_data) {
5845 * Ahh but wait, this is that special case
5846 * where the pacing timer can be disturbed
5847 * backout the changes (used for non-paced
5850 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
5852 if ((rack->use_rack_rr) &&
5853 (rack->r_rr_config < 2) &&
5854 ((hpts_timeout) && (hpts_timeout < slot))) {
5856 * Arrange for the hpts to kick back in after the
5857 * t-o if the t-o does not cause a send.
5859 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout),
5861 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5862 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
5864 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot),
5866 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5867 rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
5869 } else if (hpts_timeout) {
5871 * With respect to inp_flags2 here, lets let any new acks wake
5872 * us up here. Since we are not pacing (no pacing timer), output
5873 * can happen so we should let it. If its a Rack timer, then any inbound
5874 * packet probably won't change the sending (we will be blocked)
5875 * but it may change the prr stats so letting it in (the set defaults
5876 * at the start of this block) are good enough.
5878 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout),
5880 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5881 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
5883 /* No timer starting */
5885 if (SEQ_GT(tp->snd_max, tp->snd_una)) {
5886 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
5887 tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
5891 rack->rc_tmr_stopped = 0;
5893 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv);
5897 * RACK Timer, here we simply do logging and house keeping.
5898 * the normal rack_output() function will call the
5899 * appropriate thing to check if we need to do a RACK retransmit.
5900 * We return 1, saying don't proceed with rack_output only
5901 * when all timers have been stopped (destroyed PCB?).
5904 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5907 * This timer simply provides an internal trigger to send out data.
5908 * The check_recovery_mode call will see if there are needed
5909 * retransmissions, if so we will enter fast-recovery. The output
5910 * call may or may not do the same thing depending on sysctl
5913 struct rack_sendmap *rsm;
5915 if (tp->t_timers->tt_flags & TT_STOPPED) {
5918 counter_u64_add(rack_to_tot, 1);
5919 if (rack->r_state && (rack->r_state != tp->t_state))
5920 rack_set_state(tp, rack);
5921 rack->rc_on_min_to = 0;
5922 rsm = rack_check_recovery_mode(tp, cts);
5923 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm);
5925 rack->r_ctl.rc_resend = rsm;
5926 rack->r_timer_override = 1;
5927 if (rack->use_rack_rr) {
5929 * Don't accumulate extra pacing delay
5930 * we are allowing the rack timer to
5931 * over-ride pacing i.e. rrr takes precedence
5932 * if the pacing interval is longer than the rrr
5933 * time (in other words we get the min pacing
5934 * time versus rrr pacing time).
5936 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
5939 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
5941 /* restart a timer and return 1 */
5942 rack_start_hpts_timer(rack, tp, cts,
5950 rack_adjust_orig_mlen(struct rack_sendmap *rsm)
5952 if (rsm->m->m_len > rsm->orig_m_len) {
5954 * Mbuf grew, caused by sbcompress, our offset does
5957 rsm->orig_m_len = rsm->m->m_len;
5958 } else if (rsm->m->m_len < rsm->orig_m_len) {
5960 * Mbuf shrank, trimmed off the top by an ack, our
5963 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len);
5964 rsm->orig_m_len = rsm->m->m_len;
5969 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm)
5974 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) {
5975 /* Fix up the orig_m_len and possibly the mbuf offset */
5976 rack_adjust_orig_mlen(src_rsm);
5979 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start);
5980 while (soff >= m->m_len) {
5981 /* Move out past this mbuf */
5984 KASSERT((m != NULL),
5985 ("rsm:%p nrsm:%p hit at soff:%u null m",
5986 src_rsm, rsm, soff));
5990 rsm->orig_m_len = m->m_len;
5993 static __inline void
5994 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm,
5995 struct rack_sendmap *rsm, uint32_t start)
5999 nrsm->r_start = start;
6000 nrsm->r_end = rsm->r_end;
6001 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
6002 nrsm->r_flags = rsm->r_flags;
6003 nrsm->r_dupack = rsm->r_dupack;
6004 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed;
6005 nrsm->r_rtr_bytes = 0;
6006 nrsm->r_fas = rsm->r_fas;
6007 rsm->r_end = nrsm->r_start;
6008 nrsm->r_just_ret = rsm->r_just_ret;
6009 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
6010 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
6012 /* Now if we have SYN flag we keep it on the left edge */
6013 if (nrsm->r_flags & RACK_HAS_SYN)
6014 nrsm->r_flags &= ~RACK_HAS_SYN;
6015 /* Now if we have a FIN flag we keep it on the right edge */
6016 if (rsm->r_flags & RACK_HAS_FIN)
6017 rsm->r_flags &= ~RACK_HAS_FIN;
6018 /* Push bit must go to the right edge as well */
6019 if (rsm->r_flags & RACK_HAD_PUSH)
6020 rsm->r_flags &= ~RACK_HAD_PUSH;
6021 /* Clone over the state of the hw_tls flag */
6022 nrsm->r_hw_tls = rsm->r_hw_tls;
6024 * Now we need to find nrsm's new location in the mbuf chain
6025 * we basically calculate a new offset, which is soff +
6026 * how much is left in original rsm. Then we walk out the mbuf
6027 * chain to find the righ position, it may be the same mbuf
6030 KASSERT(((rsm->m != NULL) ||
6031 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))),
6032 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack));
6034 rack_setup_offset_for_rsm(rsm, nrsm);
6037 static struct rack_sendmap *
6038 rack_merge_rsm(struct tcp_rack *rack,
6039 struct rack_sendmap *l_rsm,
6040 struct rack_sendmap *r_rsm)
6043 * We are merging two ack'd RSM's,
6044 * the l_rsm is on the left (lower seq
6045 * values) and the r_rsm is on the right
6046 * (higher seq value). The simplest way
6047 * to merge these is to move the right
6048 * one into the left. I don't think there
6049 * is any reason we need to try to find
6050 * the oldest (or last oldest retransmitted).
6053 struct rack_sendmap *rm;
6055 rack_log_map_chg(rack->rc_tp, rack, NULL,
6056 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__);
6057 l_rsm->r_end = r_rsm->r_end;
6058 if (l_rsm->r_dupack < r_rsm->r_dupack)
6059 l_rsm->r_dupack = r_rsm->r_dupack;
6060 if (r_rsm->r_rtr_bytes)
6061 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes;
6062 if (r_rsm->r_in_tmap) {
6063 /* This really should not happen */
6064 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext);
6065 r_rsm->r_in_tmap = 0;
6069 if (r_rsm->r_flags & RACK_HAS_FIN)
6070 l_rsm->r_flags |= RACK_HAS_FIN;
6071 if (r_rsm->r_flags & RACK_TLP)
6072 l_rsm->r_flags |= RACK_TLP;
6073 if (r_rsm->r_flags & RACK_RWND_COLLAPSED)
6074 l_rsm->r_flags |= RACK_RWND_COLLAPSED;
6075 if ((r_rsm->r_flags & RACK_APP_LIMITED) &&
6076 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) {
6078 * If both are app-limited then let the
6079 * free lower the count. If right is app
6080 * limited and left is not, transfer.
6082 l_rsm->r_flags |= RACK_APP_LIMITED;
6083 r_rsm->r_flags &= ~RACK_APP_LIMITED;
6084 if (r_rsm == rack->r_ctl.rc_first_appl)
6085 rack->r_ctl.rc_first_appl = l_rsm;
6088 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm);
6090 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm);
6092 panic("removing head in rack:%p rsm:%p rm:%p",
6096 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) {
6097 /* Transfer the split limit to the map we free */
6098 r_rsm->r_limit_type = l_rsm->r_limit_type;
6099 l_rsm->r_limit_type = 0;
6101 rack_free(rack, r_rsm);
6106 * TLP Timer, here we simply setup what segment we want to
6107 * have the TLP expire on, the normal rack_output() will then
6110 * We return 1, saying don't proceed with rack_output only
6111 * when all timers have been stopped (destroyed PCB?).
6114 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp)
6119 struct rack_sendmap *rsm = NULL;
6121 struct rack_sendmap *insret;
6125 uint32_t out, avail;
6126 int collapsed_win = 0;
6128 if (tp->t_timers->tt_flags & TT_STOPPED) {
6131 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
6132 /* Its not time yet */
6135 if (ctf_progress_timeout_check(tp, true)) {
6136 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6137 return (-ETIMEDOUT); /* tcp_drop() */
6140 * A TLP timer has expired. We have been idle for 2 rtts. So we now
6141 * need to figure out how to force a full MSS segment out.
6143 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL);
6144 rack->r_ctl.retran_during_recovery = 0;
6145 rack->r_ctl.dsack_byte_cnt = 0;
6146 counter_u64_add(rack_tlp_tot, 1);
6147 if (rack->r_state && (rack->r_state != tp->t_state))
6148 rack_set_state(tp, rack);
6149 so = tp->t_inpcb->inp_socket;
6150 avail = sbavail(&so->so_snd);
6151 out = tp->snd_max - tp->snd_una;
6152 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) {
6153 /* special case, we need a retransmission */
6157 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) {
6158 rack->r_ctl.dsack_persist--;
6159 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
6160 rack->r_ctl.num_dsack = 0;
6162 rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
6164 if ((tp->t_flags & TF_GPUTINPROG) &&
6165 (rack->r_ctl.rc_tlp_cnt_out == 1)) {
6167 * If this is the second in a row
6168 * TLP and we are doing a measurement
6169 * its time to abandon the measurement.
6170 * Something is likely broken on
6171 * the clients network and measuring a
6172 * broken network does us no good.
6174 tp->t_flags &= ~TF_GPUTINPROG;
6175 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
6176 rack->r_ctl.rc_gp_srtt /*flex1*/,
6178 0, 0, 18, __LINE__, NULL, 0);
6181 * Check our send oldest always settings, and if
6182 * there is an oldest to send jump to the need_retran.
6184 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0))
6188 /* New data is available */
6190 if (amm > ctf_fixed_maxseg(tp)) {
6191 amm = ctf_fixed_maxseg(tp);
6192 if ((amm + out) > tp->snd_wnd) {
6193 /* We are rwnd limited */
6196 } else if (amm < ctf_fixed_maxseg(tp)) {
6197 /* not enough to fill a MTU */
6200 if (IN_FASTRECOVERY(tp->t_flags)) {
6202 if (rack->rack_no_prr == 0) {
6203 if (out + amm <= tp->snd_wnd) {
6204 rack->r_ctl.rc_prr_sndcnt = amm;
6205 rack->r_ctl.rc_tlp_new_data = amm;
6206 rack_log_to_prr(rack, 4, 0, __LINE__);
6211 /* Set the send-new override */
6212 if (out + amm <= tp->snd_wnd)
6213 rack->r_ctl.rc_tlp_new_data = amm;
6217 rack->r_ctl.rc_tlpsend = NULL;
6218 counter_u64_add(rack_tlp_newdata, 1);
6223 * Ok we need to arrange the last un-acked segment to be re-sent, or
6224 * optionally the first un-acked segment.
6226 if (collapsed_win == 0) {
6227 if (rack_always_send_oldest)
6228 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6230 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6231 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
6232 rsm = rack_find_high_nonack(rack, rsm);
6237 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
6243 * We must find the last segment
6244 * that was acceptable by the client.
6246 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
6247 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) {
6253 /* None? if so send the first */
6254 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6257 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
6263 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) {
6265 * We need to split this the last segment in two.
6267 struct rack_sendmap *nrsm;
6269 nrsm = rack_alloc_full_limit(rack);
6272 * No memory to split, we will just exit and punt
6273 * off to the RXT timer.
6277 rack_clone_rsm(rack, nrsm, rsm,
6278 (rsm->r_end - ctf_fixed_maxseg(tp)));
6279 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
6281 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
6283 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
6284 if (insret != NULL) {
6285 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
6286 nrsm, insret, rack, rsm);
6289 if (rsm->r_in_tmap) {
6290 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
6291 nrsm->r_in_tmap = 1;
6295 rack->r_ctl.rc_tlpsend = rsm;
6297 /* Make sure output path knows we are doing a TLP */
6299 rack->r_timer_override = 1;
6300 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
6303 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
6308 * Delayed ack Timer, here we simply need to setup the
6309 * ACK_NOW flag and remove the DELACK flag. From there
6310 * the output routine will send the ack out.
6312 * We only return 1, saying don't proceed, if all timers
6313 * are stopped (destroyed PCB?).
6316 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6318 if (tp->t_timers->tt_flags & TT_STOPPED) {
6321 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL);
6322 tp->t_flags &= ~TF_DELACK;
6323 tp->t_flags |= TF_ACKNOW;
6324 KMOD_TCPSTAT_INC(tcps_delack);
6325 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
6330 * Persists timer, here we simply send the
6331 * same thing as a keepalive will.
6332 * the one byte send.
6334 * We only return 1, saying don't proceed, if all timers
6335 * are stopped (destroyed PCB?).
6338 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6340 struct tcptemp *t_template;
6342 struct inpcb *inp = tp->t_inpcb;
6346 if (tp->t_timers->tt_flags & TT_STOPPED) {
6349 if (rack->rc_in_persist == 0)
6351 if (ctf_progress_timeout_check(tp, false)) {
6352 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6353 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6354 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
6355 return (-ETIMEDOUT); /* tcp_drop() */
6357 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
6359 * Persistence timer into zero window. Force a byte to be output, if
6362 KMOD_TCPSTAT_INC(tcps_persisttimeo);
6364 * Hack: if the peer is dead/unreachable, we do not time out if the
6365 * window is closed. After a full backoff, drop the connection if
6366 * the idle time (no responses to probes) reaches the maximum
6367 * backoff that we would use if retransmitting.
6369 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
6370 (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
6371 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) {
6372 KMOD_TCPSTAT_INC(tcps_persistdrop);
6373 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6374 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
6375 retval = -ETIMEDOUT; /* tcp_drop() */
6378 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
6379 tp->snd_una == tp->snd_max)
6380 rack_exit_persist(tp, rack, cts);
6381 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
6383 * If the user has closed the socket then drop a persisting
6384 * connection after a much reduced timeout.
6386 if (tp->t_state > TCPS_CLOSE_WAIT &&
6387 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
6388 KMOD_TCPSTAT_INC(tcps_persistdrop);
6389 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6390 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
6391 retval = -ETIMEDOUT; /* tcp_drop() */
6394 t_template = tcpip_maketemplate(rack->rc_inp);
6396 /* only set it if we were answered */
6397 if (rack->forced_ack == 0) {
6398 rack->forced_ack = 1;
6399 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
6401 rack->probe_not_answered = 1;
6402 counter_u64_add(rack_persists_loss, 1);
6403 rack->r_ctl.persist_lost_ends++;
6405 counter_u64_add(rack_persists_sends, 1);
6406 tcp_respond(tp, t_template->tt_ipgen,
6407 &t_template->tt_t, (struct mbuf *)NULL,
6408 tp->rcv_nxt, tp->snd_una - 1, 0);
6409 /* This sends an ack */
6410 if (tp->t_flags & TF_DELACK)
6411 tp->t_flags &= ~TF_DELACK;
6412 free(t_template, M_TEMP);
6414 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
6417 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL);
6418 rack_start_hpts_timer(rack, tp, cts,
6424 * If a keepalive goes off, we had no other timers
6425 * happening. We always return 1 here since this
6426 * routine either drops the connection or sends
6427 * out a segment with respond.
6430 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6432 struct tcptemp *t_template;
6435 if (tp->t_timers->tt_flags & TT_STOPPED) {
6438 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
6440 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL);
6442 * Keep-alive timer went off; send something or drop connection if
6443 * idle for too long.
6445 KMOD_TCPSTAT_INC(tcps_keeptimeo);
6446 if (tp->t_state < TCPS_ESTABLISHED)
6448 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
6449 tp->t_state <= TCPS_CLOSING) {
6450 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
6453 * Send a packet designed to force a response if the peer is
6454 * up and reachable: either an ACK if the connection is
6455 * still alive, or an RST if the peer has closed the
6456 * connection due to timeout or reboot. Using sequence
6457 * number tp->snd_una-1 causes the transmitted zero-length
6458 * segment to lie outside the receive window; by the
6459 * protocol spec, this requires the correspondent TCP to
6462 KMOD_TCPSTAT_INC(tcps_keepprobe);
6463 t_template = tcpip_maketemplate(inp);
6465 if (rack->forced_ack == 0) {
6466 rack->forced_ack = 1;
6467 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
6469 rack->probe_not_answered = 1;
6471 tcp_respond(tp, t_template->tt_ipgen,
6472 &t_template->tt_t, (struct mbuf *)NULL,
6473 tp->rcv_nxt, tp->snd_una - 1, 0);
6474 free(t_template, M_TEMP);
6477 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
6480 KMOD_TCPSTAT_INC(tcps_keepdrops);
6481 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
6482 return (-ETIMEDOUT); /* tcp_drop() */
6486 * Retransmit helper function, clear up all the ack
6487 * flags and take care of important book keeping.
6490 rack_remxt_tmr(struct tcpcb *tp)
6493 * The retransmit timer went off, all sack'd blocks must be
6496 struct rack_sendmap *rsm, *trsm = NULL;
6497 struct tcp_rack *rack;
6499 rack = (struct tcp_rack *)tp->t_fb_ptr;
6500 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__);
6501 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL);
6502 if (rack->r_state && (rack->r_state != tp->t_state))
6503 rack_set_state(tp, rack);
6505 * Ideally we would like to be able to
6506 * mark SACK-PASS on anything not acked here.
6508 * However, if we do that we would burst out
6509 * all that data 1ms apart. This would be unwise,
6510 * so for now we will just let the normal rxt timer
6511 * and tlp timer take care of it.
6513 * Also we really need to stick them back in sequence
6514 * order. This way we send in the proper order and any
6515 * sacks that come floating in will "re-ack" the data.
6516 * To do this we zap the tmap with an INIT and then
6517 * walk through and place every rsm in the RB tree
6518 * back in its seq ordered place.
6520 TAILQ_INIT(&rack->r_ctl.rc_tmap);
6521 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
6523 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
6524 /* We must re-add it back to the tlist */
6526 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
6528 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
6532 if (rsm->r_flags & RACK_ACKED)
6533 rsm->r_flags |= RACK_WAS_ACKED;
6534 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED);
6535 rsm->r_flags |= RACK_MUST_RXT;
6537 /* Clear the count (we just un-acked them) */
6538 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una;
6539 rack->r_ctl.rc_sacked = 0;
6540 rack->r_ctl.rc_sacklast = NULL;
6541 rack->r_ctl.rc_agg_delayed = 0;
6543 rack->r_ctl.rc_agg_early = 0;
6545 /* Clear the tlp rtx mark */
6546 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6547 if (rack->r_ctl.rc_resend != NULL)
6548 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT;
6549 rack->r_ctl.rc_prr_sndcnt = 0;
6550 rack_log_to_prr(rack, 6, 0, __LINE__);
6551 rack->r_timer_override = 1;
6552 if ((((tp->t_flags & TF_SACK_PERMIT) == 0)
6553 #ifdef NETFLIX_EXP_DETECTION
6554 || (rack->sack_attack_disable != 0)
6556 ) && ((tp->t_flags & TF_SENTFIN) == 0)) {
6558 * For non-sack customers new data
6559 * needs to go out as retransmits until
6560 * we retransmit up to snd_max.
6562 rack->r_must_retran = 1;
6563 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp,
6564 rack->r_ctl.rc_sacked);
6566 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
6570 rack_convert_rtts(struct tcpcb *tp)
6572 if (tp->t_srtt > 1) {
6575 val = tp->t_srtt >> TCP_RTT_SHIFT;
6576 frac = tp->t_srtt & 0x1f;
6577 tp->t_srtt = TICKS_2_USEC(val);
6579 * frac is the fractional part of the srtt (if any)
6580 * but its in ticks and every bit represents
6585 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE);
6587 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE));
6595 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT;
6596 frac = tp->t_rttvar & 0x1f;
6597 tp->t_rttvar = TICKS_2_USEC(val);
6599 * frac is the fractional part of the srtt (if any)
6600 * but its in ticks and every bit represents
6605 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE);
6607 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE));
6609 tp->t_rttvar += frac;
6612 tp->t_rxtcur = RACK_REXMTVAL(tp);
6613 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
6614 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop);
6616 if (tp->t_rxtcur > rack_rto_max) {
6617 tp->t_rxtcur = rack_rto_max;
6622 rack_cc_conn_init(struct tcpcb *tp)
6624 struct tcp_rack *rack;
6627 rack = (struct tcp_rack *)tp->t_fb_ptr;
6631 * Now convert to rack's internal format,
6634 if ((srtt == 0) && (tp->t_srtt != 0))
6635 rack_convert_rtts(tp);
6637 * We want a chance to stay in slowstart as
6638 * we create a connection. TCP spec says that
6639 * initially ssthresh is infinite. For our
6640 * purposes that is the snd_wnd.
6642 if (tp->snd_ssthresh < tp->snd_wnd) {
6643 tp->snd_ssthresh = tp->snd_wnd;
6646 * We also want to assure a IW worth of
6647 * data can get inflight.
6649 if (rc_init_window(rack) < tp->snd_cwnd)
6650 tp->snd_cwnd = rc_init_window(rack);
6654 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
6655 * we will setup to retransmit the lowest seq number outstanding.
6658 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6664 if (tp->t_timers->tt_flags & TT_STOPPED) {
6667 if ((tp->t_flags & TF_GPUTINPROG) &&
6670 * We have had a second timeout
6671 * measurements on successive rxt's are not profitable.
6672 * It is unlikely to be of any use (the network is
6673 * broken or the client went away).
6675 tp->t_flags &= ~TF_GPUTINPROG;
6676 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
6677 rack->r_ctl.rc_gp_srtt /*flex1*/,
6679 0, 0, 18, __LINE__, NULL, 0);
6681 if (ctf_progress_timeout_check(tp, false)) {
6682 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
6683 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6684 return (-ETIMEDOUT); /* tcp_drop() */
6686 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
6687 rack->r_ctl.retran_during_recovery = 0;
6688 rack->rc_ack_required = 1;
6689 rack->r_ctl.dsack_byte_cnt = 0;
6690 if (IN_FASTRECOVERY(tp->t_flags))
6691 tp->t_flags |= TF_WASFRECOVERY;
6693 tp->t_flags &= ~TF_WASFRECOVERY;
6694 if (IN_CONGRECOVERY(tp->t_flags))
6695 tp->t_flags |= TF_WASCRECOVERY;
6697 tp->t_flags &= ~TF_WASCRECOVERY;
6698 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
6699 (tp->snd_una == tp->snd_max)) {
6700 /* Nothing outstanding .. nothing to do */
6703 if (rack->r_ctl.dsack_persist) {
6704 rack->r_ctl.dsack_persist--;
6705 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
6706 rack->r_ctl.num_dsack = 0;
6708 rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
6711 * Rack can only run one timer at a time, so we cannot
6712 * run a KEEPINIT (gating SYN sending) and a retransmit
6713 * timer for the SYN. So if we are in a front state and
6714 * have a KEEPINIT timer we need to check the first transmit
6715 * against now to see if we have exceeded the KEEPINIT time
6718 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
6719 (TP_KEEPINIT(tp) != 0)) {
6720 struct rack_sendmap *rsm;
6722 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6724 /* Ok we have something outstanding to test keepinit with */
6725 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) &&
6726 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) {
6727 /* We have exceeded the KEEPINIT time */
6728 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
6734 * Retransmission timer went off. Message has not been acked within
6735 * retransmit interval. Back off to a longer retransmit interval
6736 * and retransmit one segment.
6739 if ((rack->r_ctl.rc_resend == NULL) ||
6740 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) {
6742 * If the rwnd collapsed on
6743 * the one we are retransmitting
6744 * it does not count against the
6749 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) {
6750 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
6752 tp->t_rxtshift = TCP_MAXRXTSHIFT;
6753 KMOD_TCPSTAT_INC(tcps_timeoutdrop);
6754 /* XXXGL: previously t_softerror was casted to uint16_t */
6755 MPASS(tp->t_softerror >= 0);
6756 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT;
6757 goto out; /* tcp_drop() */
6759 if (tp->t_state == TCPS_SYN_SENT) {
6761 * If the SYN was retransmitted, indicate CWND to be limited
6762 * to 1 segment in cc_conn_init().
6765 } else if (tp->t_rxtshift == 1) {
6767 * first retransmit; record ssthresh and cwnd so they can be
6768 * recovered if this turns out to be a "bad" retransmit. A
6769 * retransmit is considered "bad" if an ACK for this segment
6770 * is received within RTT/2 interval; the assumption here is
6771 * that the ACK was already in flight. See "On Estimating
6772 * End-to-End Network Path Properties" by Allman and Paxson
6775 tp->snd_cwnd_prev = tp->snd_cwnd;
6776 tp->snd_ssthresh_prev = tp->snd_ssthresh;
6777 tp->snd_recover_prev = tp->snd_recover;
6778 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2);
6779 tp->t_flags |= TF_PREVVALID;
6780 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0)
6781 tp->t_flags &= ~TF_PREVVALID;
6782 KMOD_TCPSTAT_INC(tcps_rexmttimeo);
6783 if ((tp->t_state == TCPS_SYN_SENT) ||
6784 (tp->t_state == TCPS_SYN_RECEIVED))
6785 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift];
6787 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift];
6789 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt,
6790 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop);
6792 * We enter the path for PLMTUD if connection is established or, if
6793 * connection is FIN_WAIT_1 status, reason for the last is that if
6794 * amount of data we send is very small, we could send it in couple
6795 * of packets and process straight to FIN. In that case we won't
6796 * catch ESTABLISHED state.
6799 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false;
6803 if (((V_tcp_pmtud_blackhole_detect == 1) ||
6804 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) ||
6805 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) &&
6806 ((tp->t_state == TCPS_ESTABLISHED) ||
6807 (tp->t_state == TCPS_FIN_WAIT_1))) {
6809 * Idea here is that at each stage of mtu probe (usually,
6810 * 1448 -> 1188 -> 524) should be given 2 chances to recover
6811 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
6812 * should take care of that.
6814 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
6815 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
6816 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
6817 tp->t_rxtshift % 2 == 0)) {
6819 * Enter Path MTU Black-hole Detection mechanism: -
6820 * Disable Path MTU Discovery (IP "DF" bit). -
6821 * Reduce MTU to lower value than what we negotiated
6824 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
6825 /* Record that we may have found a black hole. */
6826 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
6827 /* Keep track of previous MSS. */
6828 tp->t_pmtud_saved_maxseg = tp->t_maxseg;
6832 * Reduce the MSS to blackhole value or to the
6833 * default in an attempt to retransmit.
6837 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
6838 /* Use the sysctl tuneable blackhole MSS. */
6839 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
6840 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
6841 } else if (isipv6) {
6842 /* Use the default MSS. */
6843 tp->t_maxseg = V_tcp_v6mssdflt;
6845 * Disable Path MTU Discovery when we switch
6848 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
6849 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
6852 #if defined(INET6) && defined(INET)
6856 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
6857 /* Use the sysctl tuneable blackhole MSS. */
6858 tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
6859 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
6861 /* Use the default MSS. */
6862 tp->t_maxseg = V_tcp_mssdflt;
6864 * Disable Path MTU Discovery when we switch
6867 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
6868 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
6873 * If further retransmissions are still unsuccessful
6874 * with a lowered MTU, maybe this isn't a blackhole
6875 * and we restore the previous MSS and blackhole
6876 * detection flags. The limit '6' is determined by
6877 * giving each probe stage (1448, 1188, 524) 2
6878 * chances to recover.
6880 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
6881 (tp->t_rxtshift >= 6)) {
6882 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
6883 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
6884 tp->t_maxseg = tp->t_pmtud_saved_maxseg;
6885 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed);
6890 * Disable RFC1323 and SACK if we haven't got any response to
6891 * our third SYN to work-around some broken terminal servers
6892 * (most of which have hopefully been retired) that have bad VJ
6893 * header compression code which trashes TCP segments containing
6894 * unknown-to-them TCP options.
6896 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
6897 (tp->t_rxtshift == 3))
6898 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT);
6900 * If we backed off this far, our srtt estimate is probably bogus.
6901 * Clobber it so we'll take the next rtt measurement as our srtt;
6902 * move the current srtt into rttvar to keep the current retransmit
6905 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
6907 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
6908 in6_losing(tp->t_inpcb);
6911 in_losing(tp->t_inpcb);
6912 tp->t_rttvar += tp->t_srtt;
6915 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
6916 tp->snd_recover = tp->snd_max;
6917 tp->t_flags |= TF_ACKNOW;
6919 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__);
6925 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp)
6928 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
6930 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
6931 (tp->t_flags & TF_GPUTINPROG)) {
6933 * We have a goodput in progress
6934 * and we have entered a late state.
6935 * Do we have enough data in the sb
6936 * to handle the GPUT request?
6940 bytes = tp->gput_ack - tp->gput_seq;
6941 if (SEQ_GT(tp->gput_seq, tp->snd_una))
6942 bytes += tp->gput_seq - tp->snd_una;
6943 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
6945 * There are not enough bytes in the socket
6946 * buffer that have been sent to cover this
6947 * measurement. Cancel it.
6949 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
6950 rack->r_ctl.rc_gp_srtt /*flex1*/,
6952 0, 0, 18, __LINE__, NULL, 0);
6953 tp->t_flags &= ~TF_GPUTINPROG;
6959 if (tp->t_state == TCPS_LISTEN) {
6960 /* no timers on listen sockets */
6961 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
6965 if ((timers & PACE_TMR_RACK) &&
6966 rack->rc_on_min_to) {
6968 * For the rack timer when we
6969 * are on a min-timeout (which means rrr_conf = 3)
6970 * we don't want to check the timer. It may
6971 * be going off for a pace and thats ok we
6972 * want to send the retransmit (if its ready).
6974 * If its on a normal rack timer (non-min) then
6975 * we will check if its expired.
6977 goto skip_time_check;
6979 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
6982 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
6984 rack_log_to_processing(rack, cts, ret, 0);
6987 if (hpts_calling == 0) {
6989 * A user send or queued mbuf (sack) has called us? We
6990 * return 0 and let the pacing guards
6991 * deal with it if they should or
6992 * should not cause a send.
6995 rack_log_to_processing(rack, cts, ret, 0);
6999 * Ok our timer went off early and we are not paced false
7000 * alarm, go back to sleep.
7003 left = rack->r_ctl.rc_timer_exp - cts;
7004 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left));
7005 rack_log_to_processing(rack, cts, ret, left);
7009 rack->rc_tmr_stopped = 0;
7010 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
7011 if (timers & PACE_TMR_DELACK) {
7012 ret = rack_timeout_delack(tp, rack, cts);
7013 } else if (timers & PACE_TMR_RACK) {
7014 rack->r_ctl.rc_tlp_rxt_last_time = cts;
7015 rack->r_fast_output = 0;
7016 ret = rack_timeout_rack(tp, rack, cts);
7017 } else if (timers & PACE_TMR_TLP) {
7018 rack->r_ctl.rc_tlp_rxt_last_time = cts;
7019 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp);
7020 } else if (timers & PACE_TMR_RXT) {
7021 rack->r_ctl.rc_tlp_rxt_last_time = cts;
7022 rack->r_fast_output = 0;
7023 ret = rack_timeout_rxt(tp, rack, cts);
7024 } else if (timers & PACE_TMR_PERSIT) {
7025 ret = rack_timeout_persist(tp, rack, cts);
7026 } else if (timers & PACE_TMR_KEEP) {
7027 ret = rack_timeout_keepalive(tp, rack, cts);
7029 rack_log_to_processing(rack, cts, ret, timers);
7034 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
7037 uint32_t us_cts, flags_on_entry;
7038 uint8_t hpts_removed = 0;
7040 flags_on_entry = rack->r_ctl.rc_hpts_flags;
7041 us_cts = tcp_get_usecs(&tv);
7042 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
7043 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) ||
7044 ((tp->snd_max - tp->snd_una) == 0))) {
7045 tcp_hpts_remove(rack->rc_inp);
7047 /* If we were not delayed cancel out the flag. */
7048 if ((tp->snd_max - tp->snd_una) == 0)
7049 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
7050 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
7052 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
7053 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
7054 if (tcp_in_hpts(rack->rc_inp) &&
7055 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
7057 * Canceling timer's when we have no output being
7058 * paced. We also must remove ourselves from the
7061 tcp_hpts_remove(rack->rc_inp);
7064 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
7066 if (hpts_removed == 0)
7067 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
7071 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type)
7077 rack_stopall(struct tcpcb *tp)
7079 struct tcp_rack *rack;
7080 rack = (struct tcp_rack *)tp->t_fb_ptr;
7081 rack->t_timers_stopped = 1;
7086 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta)
7092 rack_timer_active(struct tcpcb *tp, uint32_t timer_type)
7098 rack_stop_all_timers(struct tcpcb *tp)
7100 struct tcp_rack *rack;
7103 * Assure no timers are running.
7105 if (tcp_timer_active(tp, TT_PERSIST)) {
7106 /* We enter in persists, set the flag appropriately */
7107 rack = (struct tcp_rack *)tp->t_fb_ptr;
7108 rack->rc_in_persist = 1;
7110 tcp_timer_suspend(tp, TT_PERSIST);
7111 tcp_timer_suspend(tp, TT_REXMT);
7112 tcp_timer_suspend(tp, TT_KEEP);
7113 tcp_timer_suspend(tp, TT_DELACK);
7117 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
7118 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag)
7123 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7125 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
7126 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
7127 rsm->r_flags |= RACK_OVERMAX;
7129 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) {
7130 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
7131 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
7133 idx = rsm->r_rtr_cnt - 1;
7134 rsm->r_tim_lastsent[idx] = ts;
7136 * Here we don't add in the len of send, since its already
7137 * in snduna <->snd_max.
7139 rsm->r_fas = ctf_flight_size(rack->rc_tp,
7140 rack->r_ctl.rc_sacked);
7141 if (rsm->r_flags & RACK_ACKED) {
7142 /* Problably MTU discovery messing with us */
7143 rsm->r_flags &= ~RACK_ACKED;
7144 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
7146 if (rsm->r_in_tmap) {
7147 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7150 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7152 /* Take off the must retransmit flag, if its on */
7153 if (rsm->r_flags & RACK_MUST_RXT) {
7154 if (rack->r_must_retran)
7155 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
7156 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
7158 * We have retransmitted all we need. Clear
7159 * any must retransmit flags.
7161 rack->r_must_retran = 0;
7162 rack->r_ctl.rc_out_at_rto = 0;
7164 rsm->r_flags &= ~RACK_MUST_RXT;
7166 if (rsm->r_flags & RACK_SACK_PASSED) {
7167 /* We have retransmitted due to the SACK pass */
7168 rsm->r_flags &= ~RACK_SACK_PASSED;
7169 rsm->r_flags |= RACK_WAS_SACKPASS;
7174 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
7175 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag)
7178 * We (re-)transmitted starting at rsm->r_start for some length
7179 * (possibly less than r_end.
7181 struct rack_sendmap *nrsm;
7183 struct rack_sendmap *insret;
7189 c_end = rsm->r_start + len;
7190 if (SEQ_GEQ(c_end, rsm->r_end)) {
7192 * We retransmitted the whole piece or more than the whole
7193 * slopping into the next rsm.
7195 rack_update_rsm(tp, rack, rsm, ts, add_flag);
7196 if (c_end == rsm->r_end) {
7202 /* Hangs over the end return whats left */
7203 act_len = rsm->r_end - rsm->r_start;
7204 *lenp = (len - act_len);
7205 return (rsm->r_end);
7207 /* We don't get out of this block. */
7210 * Here we retransmitted less than the whole thing which means we
7211 * have to split this into what was transmitted and what was not.
7213 nrsm = rack_alloc_full_limit(rack);
7216 * We can't get memory, so lets not proceed.
7222 * So here we are going to take the original rsm and make it what we
7223 * retransmitted. nrsm will be the tail portion we did not
7224 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
7225 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
7226 * 1, 6 and the new piece will be 6, 11.
7228 rack_clone_rsm(rack, nrsm, rsm, c_end);
7230 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
7232 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7234 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7235 if (insret != NULL) {
7236 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7237 nrsm, insret, rack, rsm);
7240 if (rsm->r_in_tmap) {
7241 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7242 nrsm->r_in_tmap = 1;
7244 rsm->r_flags &= (~RACK_HAS_FIN);
7245 rack_update_rsm(tp, rack, rsm, ts, add_flag);
7246 /* Log a split of rsm into rsm and nrsm */
7247 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
7253 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
7254 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts,
7255 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls)
7257 struct tcp_rack *rack;
7258 struct rack_sendmap *rsm, *nrsm, fe;
7260 struct rack_sendmap *insret;
7262 register uint32_t snd_max, snd_una;
7265 * Add to the RACK log of packets in flight or retransmitted. If
7266 * there is a TS option we will use the TS echoed, if not we will
7269 * Retransmissions will increment the count and move the ts to its
7270 * proper place. Note that if options do not include TS's then we
7271 * won't be able to effectively use the ACK for an RTT on a retran.
7273 * Notes about r_start and r_end. Lets consider a send starting at
7274 * sequence 1 for 10 bytes. In such an example the r_start would be
7275 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
7276 * This means that r_end is actually the first sequence for the next
7281 * If err is set what do we do XXXrrs? should we not add the thing?
7282 * -- i.e. return if err != 0 or should we pretend we sent it? --
7283 * i.e. proceed with add ** do this for now.
7285 INP_WLOCK_ASSERT(tp->t_inpcb);
7288 * We don't log errors -- we could but snd_max does not
7289 * advance in this case either.
7293 if (th_flags & TH_RST) {
7295 * We don't log resets and we return immediately from
7300 rack = (struct tcp_rack *)tp->t_fb_ptr;
7301 snd_una = tp->snd_una;
7302 snd_max = tp->snd_max;
7303 if (th_flags & (TH_SYN | TH_FIN)) {
7305 * The call to rack_log_output is made before bumping
7306 * snd_max. This means we can record one extra byte on a SYN
7307 * or FIN if seq_out is adding more on and a FIN is present
7308 * (and we are not resending).
7310 if ((th_flags & TH_SYN) && (seq_out == tp->iss))
7312 if (th_flags & TH_FIN)
7314 if (SEQ_LT(snd_max, tp->snd_nxt)) {
7316 * The add/update as not been done for the FIN/SYN
7319 snd_max = tp->snd_nxt;
7322 if (SEQ_LEQ((seq_out + len), snd_una)) {
7323 /* Are sending an old segment to induce an ack (keep-alive)? */
7326 if (SEQ_LT(seq_out, snd_una)) {
7327 /* huh? should we panic? */
7330 end = seq_out + len;
7332 if (SEQ_GEQ(end, seq_out))
7333 len = end - seq_out;
7338 /* We don't log zero window probes */
7341 if (IN_FASTRECOVERY(tp->t_flags)) {
7342 rack->r_ctl.rc_prr_out += len;
7344 /* First question is it a retransmission or new? */
7345 if (seq_out == snd_max) {
7348 rsm = rack_alloc(rack);
7351 * Hmm out of memory and the tcb got destroyed while
7356 if (th_flags & TH_FIN) {
7357 rsm->r_flags = RACK_HAS_FIN|add_flag;
7359 rsm->r_flags = add_flag;
7363 rsm->r_tim_lastsent[0] = cts;
7365 rsm->r_rtr_bytes = 0;
7366 if (th_flags & TH_SYN) {
7367 /* The data space is one beyond snd_una */
7368 rsm->r_flags |= RACK_HAS_SYN;
7370 rsm->r_start = seq_out;
7371 rsm->r_end = rsm->r_start + len;
7374 * save off the mbuf location that
7375 * sndmbuf_noadv returned (which is
7376 * where we started copying from)..
7381 * Here we do add in the len of send, since its not yet
7382 * reflected in in snduna <->snd_max
7384 rsm->r_fas = (ctf_flight_size(rack->rc_tp,
7385 rack->r_ctl.rc_sacked) +
7386 (rsm->r_end - rsm->r_start));
7387 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */
7389 if (rsm->m->m_len <= rsm->soff) {
7391 * XXXrrs Question, will this happen?
7393 * If sbsndptr is set at the correct place
7394 * then s_moff should always be somewhere
7395 * within rsm->m. But if the sbsndptr was
7396 * off then that won't be true. If it occurs
7397 * we need to walkout to the correct location.
7402 while (lm->m_len <= rsm->soff) {
7403 rsm->soff -= lm->m_len;
7405 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u",
7406 __func__, rack, s_moff, s_mb, rsm->soff));
7410 rsm->orig_m_len = rsm->m->m_len;
7412 rsm->orig_m_len = 0;
7413 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7415 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__);
7417 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7419 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7420 if (insret != NULL) {
7421 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7422 nrsm, insret, rack, rsm);
7425 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7428 * Special case detection, is there just a single
7429 * packet outstanding when we are not in recovery?
7431 * If this is true mark it so.
7433 if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
7434 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) {
7435 struct rack_sendmap *prsm;
7437 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7439 prsm->r_one_out_nr = 1;
7444 * If we reach here its a retransmission and we need to find it.
7446 memset(&fe, 0, sizeof(fe));
7448 if (hintrsm && (hintrsm->r_start == seq_out)) {
7452 /* No hints sorry */
7455 if ((rsm) && (rsm->r_start == seq_out)) {
7456 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag);
7463 /* Ok it was not the last pointer go through it the hard way. */
7465 fe.r_start = seq_out;
7466 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
7468 if (rsm->r_start == seq_out) {
7469 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag);
7476 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
7477 /* Transmitted within this piece */
7479 * Ok we must split off the front and then let the
7480 * update do the rest
7482 nrsm = rack_alloc_full_limit(rack);
7484 rack_update_rsm(tp, rack, rsm, cts, add_flag);
7488 * copy rsm to nrsm and then trim the front of rsm
7489 * to not include this part.
7491 rack_clone_rsm(rack, nrsm, rsm, seq_out);
7492 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
7494 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7496 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7497 if (insret != NULL) {
7498 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7499 nrsm, insret, rack, rsm);
7502 if (rsm->r_in_tmap) {
7503 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7504 nrsm->r_in_tmap = 1;
7506 rsm->r_flags &= (~RACK_HAS_FIN);
7507 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag);
7515 * Hmm not found in map did they retransmit both old and on into the
7518 if (seq_out == tp->snd_max) {
7520 } else if (SEQ_LT(seq_out, tp->snd_max)) {
7522 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
7523 seq_out, len, tp->snd_una, tp->snd_max);
7524 printf("Starting Dump of all rack entries\n");
7525 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
7526 printf("rsm:%p start:%u end:%u\n",
7527 rsm, rsm->r_start, rsm->r_end);
7529 printf("Dump complete\n");
7530 panic("seq_out not found rack:%p tp:%p",
7536 * Hmm beyond sndmax? (only if we are using the new rtt-pack
7539 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
7540 seq_out, len, tp->snd_max, tp);
7546 * Record one of the RTT updates from an ack into
7547 * our sample structure.
7551 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt,
7552 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt)
7554 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7555 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
7556 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
7558 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7559 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
7560 rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
7562 if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
7563 if (us_rtt < rack->r_ctl.rc_gp_lowrtt)
7564 rack->r_ctl.rc_gp_lowrtt = us_rtt;
7565 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd)
7566 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
7568 if ((confidence == 1) &&
7570 (rsm->r_just_ret) ||
7571 (rsm->r_one_out_nr &&
7572 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) {
7574 * If the rsm had a just return
7575 * hit it then we can't trust the
7576 * rtt measurement for buffer deterimination
7577 * Note that a confidence of 2, indicates
7578 * SACK'd which overrides the r_just_ret or
7579 * the r_one_out_nr. If it was a CUM-ACK and
7580 * we had only two outstanding, but get an
7581 * ack for only 1. Then that also lowers our
7586 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7587 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) {
7588 if (rack->r_ctl.rack_rs.confidence == 0) {
7590 * We take anything with no current confidence
7593 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
7594 rack->r_ctl.rack_rs.confidence = confidence;
7595 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
7596 } else if (confidence || rack->r_ctl.rack_rs.confidence) {
7598 * Once we have a confident number,
7599 * we can update it with a smaller
7600 * value since this confident number
7601 * may include the DSACK time until
7602 * the next segment (the second one) arrived.
7604 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
7605 rack->r_ctl.rack_rs.confidence = confidence;
7606 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
7609 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence);
7610 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
7611 rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
7612 rack->r_ctl.rack_rs.rs_rtt_cnt++;
7616 * Collect new round-trip time estimate
7617 * and update averages and current timeout.
7620 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
7625 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
7626 /* No valid sample */
7628 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
7629 /* We are to use the lowest RTT seen in a single ack */
7630 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
7631 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
7632 /* We are to use the highest RTT seen in a single ack */
7633 rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
7634 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
7635 /* We are to use the average RTT seen in a single ack */
7636 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
7637 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
7640 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
7646 if (rack->rc_gp_rtt_set == 0) {
7648 * With no RTT we have to accept
7649 * even one we are not confident of.
7651 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt;
7652 rack->rc_gp_rtt_set = 1;
7653 } else if (rack->r_ctl.rack_rs.confidence) {
7654 /* update the running gp srtt */
7655 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8);
7656 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8;
7658 if (rack->r_ctl.rack_rs.confidence) {
7660 * record the low and high for highly buffered path computation,
7661 * we only do this if we are confident (not a retransmission).
7663 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) {
7664 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7666 if (rack->rc_highly_buffered == 0) {
7668 * Currently once we declare a path has
7669 * highly buffered there is no going
7670 * back, which may be a problem...
7672 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) {
7673 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt,
7674 rack->r_ctl.rc_highest_us_rtt,
7675 rack->r_ctl.rc_lowest_us_rtt,
7677 rack->rc_highly_buffered = 1;
7681 if ((rack->r_ctl.rack_rs.confidence) ||
7682 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) {
7684 * If we are highly confident of it <or> it was
7685 * never retransmitted we accept it as the last us_rtt.
7687 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7688 /* The lowest rtt can be set if its was not retransmited */
7689 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) {
7690 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7691 if (rack->r_ctl.rc_lowest_us_rtt == 0)
7692 rack->r_ctl.rc_lowest_us_rtt = 1;
7695 rack = (struct tcp_rack *)tp->t_fb_ptr;
7696 if (tp->t_srtt != 0) {
7698 * We keep a simple srtt in microseconds, like our rtt
7699 * measurement. We don't need to do any tricks with shifting
7700 * etc. Instead we just add in 1/8th of the new measurement
7701 * and subtract out 1/8 of the old srtt. We do the same with
7702 * the variance after finding the absolute value of the
7703 * difference between this sample and the current srtt.
7705 delta = tp->t_srtt - rtt;
7706 /* Take off 1/8th of the current sRTT */
7707 tp->t_srtt -= (tp->t_srtt >> 3);
7708 /* Add in 1/8th of the new RTT just measured */
7709 tp->t_srtt += (rtt >> 3);
7710 if (tp->t_srtt <= 0)
7712 /* Now lets make the absolute value of the variance */
7715 /* Subtract out 1/8th */
7716 tp->t_rttvar -= (tp->t_rttvar >> 3);
7717 /* Add in 1/8th of the new variance we just saw */
7718 tp->t_rttvar += (delta >> 3);
7719 if (tp->t_rttvar <= 0)
7721 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
7722 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
7725 * No rtt measurement yet - use the unsmoothed rtt. Set the
7726 * variance to half the rtt (so our first retransmit happens
7730 tp->t_rttvar = rtt >> 1;
7731 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
7733 rack->rc_srtt_measure_made = 1;
7734 KMOD_TCPSTAT_INC(tcps_rttupdated);
7737 if (rack_stats_gets_ms_rtt == 0) {
7738 /* Send in the microsecond rtt used for rxt timeout purposes */
7739 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
7740 } else if (rack_stats_gets_ms_rtt == 1) {
7741 /* Send in the millisecond rtt used for rxt timeout purposes */
7745 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
7746 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
7747 } else if (rack_stats_gets_ms_rtt == 2) {
7748 /* Send in the millisecond rtt has close to the path RTT as we can get */
7752 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
7753 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
7755 /* Send in the microsecond rtt has close to the path RTT as we can get */
7756 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt));
7761 * the retransmit should happen at rtt + 4 * rttvar. Because of the
7762 * way we do the smoothing, srtt and rttvar will each average +1/2
7763 * tick of bias. When we compute the retransmit timer, we want 1/2
7764 * tick of rounding and 1 extra tick because of +-1/2 tick
7765 * uncertainty in the firing of the timer. The bias will give us
7766 * exactly the 1.5 tick we need. But, because the bias is
7767 * statistical, we have to test that we don't drop below the minimum
7768 * feasible timer (which is 2 ticks).
7771 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
7772 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop);
7773 rack_log_rtt_sample(rack, rtt);
7774 tp->t_softerror = 0;
7779 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts)
7782 * Apply to filter the inbound us-rtt at us_cts.
7786 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
7787 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt,
7789 if (old_rtt > us_rtt) {
7790 /* We just hit a new lower rtt time */
7791 rack_log_rtt_shrinks(rack, us_cts, old_rtt,
7792 __LINE__, RACK_RTTS_NEWRTT);
7794 * Only count it if its lower than what we saw within our
7797 if ((old_rtt - us_rtt) > rack_min_rtt_movement) {
7798 if (rack_probertt_lower_within &&
7799 rack->rc_gp_dyn_mul &&
7800 (rack->use_fixed_rate == 0) &&
7801 (rack->rc_always_pace)) {
7803 * We are seeing a new lower rtt very close
7804 * to the time that we would have entered probe-rtt.
7805 * This is probably due to the fact that a peer flow
7806 * has entered probe-rtt. Lets go in now too.
7810 val = rack_probertt_lower_within * rack_time_between_probertt;
7812 if ((rack->in_probe_rtt == 0) &&
7813 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) {
7814 rack_enter_probertt(rack, us_cts);
7817 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
7823 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
7824 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack)
7828 uint32_t t, len_acked;
7830 if ((rsm->r_flags & RACK_ACKED) ||
7831 (rsm->r_flags & RACK_WAS_ACKED))
7834 if (rsm->r_no_rtt_allowed) {
7838 if (ack_type == CUM_ACKED) {
7839 if (SEQ_GT(th_ack, rsm->r_end)) {
7840 len_acked = rsm->r_end - rsm->r_start;
7843 len_acked = th_ack - rsm->r_start;
7847 len_acked = rsm->r_end - rsm->r_start;
7850 if (rsm->r_rtr_cnt == 1) {
7852 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7855 if (!tp->t_rttlow || tp->t_rttlow > t)
7857 if (!rack->r_ctl.rc_rack_min_rtt ||
7858 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7859 rack->r_ctl.rc_rack_min_rtt = t;
7860 if (rack->r_ctl.rc_rack_min_rtt == 0) {
7861 rack->r_ctl.rc_rack_min_rtt = 1;
7864 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]))
7865 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
7867 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
7870 if (CC_ALGO(tp)->rttsample != NULL) {
7871 /* Kick the RTT to the CC */
7872 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas);
7874 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
7875 if (ack_type == SACKED) {
7876 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1);
7877 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt);
7880 * We need to setup what our confidence
7883 * If the rsm was app limited and it is
7884 * less than a mss in length (the end
7885 * of the send) then we have a gap. If we
7886 * were app limited but say we were sending
7887 * multiple MSS's then we are more confident
7890 * When we are not app-limited then we see if
7891 * the rsm is being included in the current
7892 * measurement, we tell this by the app_limited_needs_set
7895 * Note that being cwnd blocked is not applimited
7896 * as well as the pacing delay between packets which
7897 * are sending only 1 or 2 MSS's also will show up
7898 * in the RTT. We probably need to examine this algorithm
7899 * a bit more and enhance it to account for the delay
7900 * between rsm's. We could do that by saving off the
7901 * pacing delay of each rsm (in an rsm) and then
7902 * factoring that in somehow though for now I am
7907 if (rsm->r_flags & RACK_APP_LIMITED) {
7908 if (all && (len_acked <= ctf_fixed_maxseg(tp)))
7912 } else if (rack->app_limited_needs_set == 0) {
7917 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2);
7918 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt,
7919 calc_conf, rsm, rsm->r_rtr_cnt);
7921 if ((rsm->r_flags & RACK_TLP) &&
7922 (!IN_FASTRECOVERY(tp->t_flags))) {
7923 /* Segment was a TLP and our retrans matched */
7924 if (rack->r_ctl.rc_tlp_cwnd_reduce) {
7925 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
7928 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
7929 /* New more recent rack_tmit_time */
7930 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7931 rack->rc_rack_rtt = t;
7936 * We clear the soft/rxtshift since we got an ack.
7937 * There is no assurance we will call the commit() function
7938 * so we need to clear these to avoid incorrect handling.
7941 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
7942 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
7943 tp->t_softerror = 0;
7944 if (to && (to->to_flags & TOF_TS) &&
7945 (ack_type == CUM_ACKED) &&
7947 ((rsm->r_flags & RACK_OVERMAX) == 0)) {
7949 * Now which timestamp does it match? In this block the ACK
7950 * must be coming from a previous transmission.
7952 for (i = 0; i < rsm->r_rtr_cnt; i++) {
7953 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) {
7954 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
7957 if (CC_ALGO(tp)->rttsample != NULL) {
7959 * Kick the RTT to the CC, here
7960 * we lie a bit in that we know the
7961 * retransmission is correct even though
7962 * we retransmitted. This is because
7963 * we match the timestamps.
7965 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i]))
7966 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i];
7968 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i];
7969 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas);
7971 if ((i + 1) < rsm->r_rtr_cnt) {
7973 * The peer ack'd from our previous
7974 * transmission. We have a spurious
7975 * retransmission and thus we dont
7976 * want to update our rack_rtt.
7978 * Hmm should there be a CC revert here?
7983 if (!tp->t_rttlow || tp->t_rttlow > t)
7985 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7986 rack->r_ctl.rc_rack_min_rtt = t;
7987 if (rack->r_ctl.rc_rack_min_rtt == 0) {
7988 rack->r_ctl.rc_rack_min_rtt = 1;
7991 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
7992 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
7993 /* New more recent rack_tmit_time */
7994 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7995 rack->rc_rack_rtt = t;
7997 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3);
7998 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm,
8006 * Ok its a SACK block that we retransmitted. or a windows
8007 * machine without timestamps. We can tell nothing from the
8008 * time-stamp since its not there or the time the peer last
8009 * recieved a segment that moved forward its cum-ack point.
8012 i = rsm->r_rtr_cnt - 1;
8013 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
8016 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
8018 * We retransmitted and the ack came back in less
8019 * than the smallest rtt we have observed. We most
8020 * likely did an improper retransmit as outlined in
8021 * 6.2 Step 2 point 2 in the rack-draft so we
8022 * don't want to update our rack_rtt. We in
8023 * theory (in future) might want to think about reverting our
8024 * cwnd state but we won't for now.
8027 } else if (rack->r_ctl.rc_rack_min_rtt) {
8029 * We retransmitted it and the retransmit did the
8032 if (!rack->r_ctl.rc_rack_min_rtt ||
8033 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
8034 rack->r_ctl.rc_rack_min_rtt = t;
8035 if (rack->r_ctl.rc_rack_min_rtt == 0) {
8036 rack->r_ctl.rc_rack_min_rtt = 1;
8039 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) {
8040 /* New more recent rack_tmit_time */
8041 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i];
8042 rack->rc_rack_rtt = t;
8051 * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
8054 rack_log_sack_passed(struct tcpcb *tp,
8055 struct tcp_rack *rack, struct rack_sendmap *rsm)
8057 struct rack_sendmap *nrsm;
8060 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
8061 rack_head, r_tnext) {
8063 /* Skip orginal segment he is acked */
8066 if (nrsm->r_flags & RACK_ACKED) {
8068 * Skip ack'd segments, though we
8069 * should not see these, since tmap
8070 * should not have ack'd segments.
8074 if (nrsm->r_flags & RACK_RWND_COLLAPSED) {
8076 * If the peer dropped the rwnd on
8077 * these then we don't worry about them.
8081 if (nrsm->r_flags & RACK_SACK_PASSED) {
8083 * We found one that is already marked
8084 * passed, we have been here before and
8085 * so all others below this are marked.
8089 nrsm->r_flags |= RACK_SACK_PASSED;
8090 nrsm->r_flags &= ~RACK_WAS_SACKPASS;
8095 rack_need_set_test(struct tcpcb *tp,
8096 struct tcp_rack *rack,
8097 struct rack_sendmap *rsm,
8103 if ((tp->t_flags & TF_GPUTINPROG) &&
8104 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
8106 * We were app limited, and this ack
8107 * butts up or goes beyond the point where we want
8108 * to start our next measurement. We need
8109 * to record the new gput_ts as here and
8110 * possibly update the start sequence.
8114 if (rsm->r_rtr_cnt > 1) {
8116 * This is a retransmit, can we
8117 * really make any assessment at this
8118 * point? We are not really sure of
8119 * the timestamp, is it this or the
8120 * previous transmission?
8122 * Lets wait for something better that
8123 * is not retransmitted.
8129 rack->app_limited_needs_set = 0;
8130 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
8131 /* Do we start at a new end? */
8132 if ((use_which == RACK_USE_BEG) &&
8133 SEQ_GEQ(rsm->r_start, tp->gput_seq)) {
8135 * When we get an ACK that just eats
8136 * up some of the rsm, we set RACK_USE_BEG
8137 * since whats at r_start (i.e. th_ack)
8138 * is left unacked and thats where the
8139 * measurement not starts.
8141 tp->gput_seq = rsm->r_start;
8142 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8144 if ((use_which == RACK_USE_END) &&
8145 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
8147 * We use the end when the cumack
8148 * is moving forward and completely
8149 * deleting the rsm passed so basically
8150 * r_end holds th_ack.
8152 * For SACK's we also want to use the end
8153 * since this piece just got sacked and
8154 * we want to target anything after that
8155 * in our measurement.
8157 tp->gput_seq = rsm->r_end;
8158 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8160 if (use_which == RACK_USE_END_OR_THACK) {
8162 * special case for ack moving forward,
8163 * not a sack, we need to move all the
8164 * way up to where this ack cum-ack moves
8167 if (SEQ_GT(th_ack, rsm->r_end))
8168 tp->gput_seq = th_ack;
8170 tp->gput_seq = rsm->r_end;
8171 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8173 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) {
8175 * We moved beyond this guy's range, re-calculate
8176 * the new end point.
8178 if (rack->rc_gp_filled == 0) {
8179 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
8181 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
8185 * We are moving the goal post, we may be able to clear the
8186 * measure_saw_probe_rtt flag.
8188 if ((rack->in_probe_rtt == 0) &&
8189 (rack->measure_saw_probe_rtt) &&
8190 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
8191 rack->measure_saw_probe_rtt = 0;
8192 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts,
8193 seq, tp->gput_seq, 0, 5, line, NULL, 0);
8194 if (rack->rc_gp_filled &&
8195 ((tp->gput_ack - tp->gput_seq) <
8196 max(rc_init_window(rack), (MIN_GP_WIN *
8197 ctf_fixed_maxseg(tp))))) {
8198 uint32_t ideal_amount;
8200 ideal_amount = rack_get_measure_window(tp, rack);
8201 if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
8203 * There is no sense of continuing this measurement
8204 * because its too small to gain us anything we
8205 * trust. Skip it and that way we can start a new
8206 * measurement quicker.
8208 tp->t_flags &= ~TF_GPUTINPROG;
8209 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
8210 0, 0, 0, 6, __LINE__, NULL, 0);
8213 * Reset the window further out.
8215 tp->gput_ack = tp->gput_seq + ideal_amount;
8222 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm)
8224 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) {
8225 /* Behind our TLP definition or right at */
8228 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) {
8229 /* The start is beyond or right at our end of TLP definition */
8232 /* It has to be a sub-part of the original TLP recorded */
8238 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
8239 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two)
8241 uint32_t start, end, changed = 0;
8242 struct rack_sendmap stack_map;
8243 struct rack_sendmap *rsm, *nrsm, fe, *prev, *next;
8245 struct rack_sendmap *insret;
8247 int32_t used_ref = 1;
8250 start = sack->start;
8253 memset(&fe, 0, sizeof(fe));
8255 if ((rsm == NULL) ||
8256 (SEQ_LT(end, rsm->r_start)) ||
8257 (SEQ_GEQ(start, rsm->r_end)) ||
8258 (SEQ_LT(start, rsm->r_start))) {
8260 * We are not in the right spot,
8261 * find the correct spot in the tree.
8265 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
8272 /* Ok we have an ACK for some piece of this rsm */
8273 if (rsm->r_start != start) {
8274 if ((rsm->r_flags & RACK_ACKED) == 0) {
8276 * Before any splitting or hookery is
8277 * done is it a TLP of interest i.e. rxt?
8279 if ((rsm->r_flags & RACK_TLP) &&
8280 (rsm->r_rtr_cnt > 1)) {
8282 * We are splitting a rxt TLP, check
8283 * if we need to save off the start/end
8285 if (rack->rc_last_tlp_acked_set &&
8286 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
8288 * We already turned this on since we are inside
8289 * the previous one was a partially sack now we
8290 * are getting another one (maybe all of it).
8293 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
8295 * Lets make sure we have all of it though.
8297 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
8298 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8299 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8300 rack->r_ctl.last_tlp_acked_end);
8302 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
8303 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8304 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8305 rack->r_ctl.last_tlp_acked_end);
8308 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8309 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8310 rack->rc_last_tlp_past_cumack = 0;
8311 rack->rc_last_tlp_acked_set = 1;
8312 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
8316 * Need to split this in two pieces the before and after,
8317 * the before remains in the map, the after must be
8318 * added. In other words we have:
8319 * rsm |--------------|
8323 * and nrsm will be the sacked piece
8326 * But before we start down that path lets
8327 * see if the sack spans over on top of
8328 * the next guy and it is already sacked.
8331 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8332 if (next && (next->r_flags & RACK_ACKED) &&
8333 SEQ_GEQ(end, next->r_start)) {
8335 * So the next one is already acked, and
8336 * we can thus by hookery use our stack_map
8337 * to reflect the piece being sacked and
8338 * then adjust the two tree entries moving
8339 * the start and ends around. So we start like:
8340 * rsm |------------| (not-acked)
8341 * next |-----------| (acked)
8342 * sackblk |-------->
8343 * We want to end like so:
8344 * rsm |------| (not-acked)
8345 * next |-----------------| (acked)
8347 * Where nrsm is a temporary stack piece we
8348 * use to update all the gizmos.
8350 /* Copy up our fudge block */
8352 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
8353 /* Now adjust our tree blocks */
8355 next->r_start = start;
8356 /* Now we must adjust back where next->m is */
8357 rack_setup_offset_for_rsm(rsm, next);
8359 /* We don't need to adjust rsm, it did not change */
8360 /* Clear out the dup ack count of the remainder */
8362 rsm->r_just_ret = 0;
8363 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8364 /* Now lets make sure our fudge block is right */
8365 nrsm->r_start = start;
8366 /* Now lets update all the stats and such */
8367 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
8368 if (rack->app_limited_needs_set)
8369 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
8370 changed += (nrsm->r_end - nrsm->r_start);
8371 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
8372 if (nrsm->r_flags & RACK_SACK_PASSED) {
8373 rack->r_ctl.rc_reorder_ts = cts;
8376 * Now we want to go up from rsm (the
8377 * one left un-acked) to the next one
8378 * in the tmap. We do this so when
8379 * we walk backwards we include marking
8380 * sack-passed on rsm (The one passed in
8381 * is skipped since it is generally called
8382 * on something sacked before removing it
8385 if (rsm->r_in_tmap) {
8386 nrsm = TAILQ_NEXT(rsm, r_tnext);
8388 * Now that we have the next
8389 * one walk backwards from there.
8391 if (nrsm && nrsm->r_in_tmap)
8392 rack_log_sack_passed(tp, rack, nrsm);
8394 /* Now are we done? */
8395 if (SEQ_LT(end, next->r_end) ||
8396 (end == next->r_end)) {
8397 /* Done with block */
8400 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__);
8401 counter_u64_add(rack_sack_used_next_merge, 1);
8402 /* Postion for the next block */
8403 start = next->r_end;
8404 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next);
8409 * We can't use any hookery here, so we
8410 * need to split the map. We enter like
8414 * We will add the new block nrsm and
8415 * that will be the new portion, and then
8416 * fall through after reseting rsm. So we
8417 * split and look like this:
8421 * We then fall through reseting
8422 * rsm to nrsm, so the next block
8425 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
8428 * failed XXXrrs what can we do but loose the sack
8433 counter_u64_add(rack_sack_splits, 1);
8434 rack_clone_rsm(rack, nrsm, rsm, start);
8435 rsm->r_just_ret = 0;
8437 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8439 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8440 if (insret != NULL) {
8441 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
8442 nrsm, insret, rack, rsm);
8445 if (rsm->r_in_tmap) {
8446 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8447 nrsm->r_in_tmap = 1;
8449 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__);
8450 rsm->r_flags &= (~RACK_HAS_FIN);
8451 /* Position us to point to the new nrsm that starts the sack blk */
8455 /* Already sacked this piece */
8456 counter_u64_add(rack_sack_skipped_acked, 1);
8458 if (end == rsm->r_end) {
8459 /* Done with block */
8460 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8462 } else if (SEQ_LT(end, rsm->r_end)) {
8463 /* A partial sack to a already sacked block */
8465 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8469 * The end goes beyond this guy
8470 * reposition the start to the
8474 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8480 if (SEQ_GEQ(end, rsm->r_end)) {
8482 * The end of this block is either beyond this guy or right
8483 * at this guy. I.e.:
8489 if ((rsm->r_flags & RACK_ACKED) == 0) {
8491 * Is it a TLP of interest?
8493 if ((rsm->r_flags & RACK_TLP) &&
8494 (rsm->r_rtr_cnt > 1)) {
8496 * We are splitting a rxt TLP, check
8497 * if we need to save off the start/end
8499 if (rack->rc_last_tlp_acked_set &&
8500 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
8502 * We already turned this on since we are inside
8503 * the previous one was a partially sack now we
8504 * are getting another one (maybe all of it).
8506 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
8508 * Lets make sure we have all of it though.
8510 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
8511 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8512 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8513 rack->r_ctl.last_tlp_acked_end);
8515 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
8516 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8517 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8518 rack->r_ctl.last_tlp_acked_end);
8521 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8522 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8523 rack->rc_last_tlp_past_cumack = 0;
8524 rack->rc_last_tlp_acked_set = 1;
8525 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
8528 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
8529 changed += (rsm->r_end - rsm->r_start);
8530 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
8531 if (rsm->r_in_tmap) /* should be true */
8532 rack_log_sack_passed(tp, rack, rsm);
8533 /* Is Reordering occuring? */
8534 if (rsm->r_flags & RACK_SACK_PASSED) {
8535 rsm->r_flags &= ~RACK_SACK_PASSED;
8536 rack->r_ctl.rc_reorder_ts = cts;
8538 if (rack->app_limited_needs_set)
8539 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
8540 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8541 rsm->r_flags |= RACK_ACKED;
8542 if (rsm->r_in_tmap) {
8543 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8546 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__);
8548 counter_u64_add(rack_sack_skipped_acked, 1);
8551 if (end == rsm->r_end) {
8552 /* This block only - done, setup for next */
8556 * There is more not coverend by this rsm move on
8557 * to the next block in the RB tree.
8559 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8567 * The end of this sack block is smaller than
8572 if ((rsm->r_flags & RACK_ACKED) == 0) {
8574 * Is it a TLP of interest?
8576 if ((rsm->r_flags & RACK_TLP) &&
8577 (rsm->r_rtr_cnt > 1)) {
8579 * We are splitting a rxt TLP, check
8580 * if we need to save off the start/end
8582 if (rack->rc_last_tlp_acked_set &&
8583 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
8585 * We already turned this on since we are inside
8586 * the previous one was a partially sack now we
8587 * are getting another one (maybe all of it).
8589 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
8591 * Lets make sure we have all of it though.
8593 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
8594 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8595 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8596 rack->r_ctl.last_tlp_acked_end);
8598 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
8599 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8600 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8601 rack->r_ctl.last_tlp_acked_end);
8604 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8605 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8606 rack->rc_last_tlp_past_cumack = 0;
8607 rack->rc_last_tlp_acked_set = 1;
8608 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
8611 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8613 (prev->r_flags & RACK_ACKED)) {
8615 * Goal, we want the right remainder of rsm to shrink
8616 * in place and span from (rsm->r_start = end) to rsm->r_end.
8617 * We want to expand prev to go all the way
8618 * to prev->r_end <- end.
8619 * so in the tree we have before:
8620 * prev |--------| (acked)
8621 * rsm |-------| (non-acked)
8623 * We churn it so we end up with
8624 * prev |----------| (acked)
8625 * rsm |-----| (non-acked)
8626 * nrsm |-| (temporary)
8628 * Note if either prev/rsm is a TLP we don't
8632 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
8635 /* Now adjust nrsm (stack copy) to be
8636 * the one that is the small
8637 * piece that was "sacked".
8641 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8643 * Now that the rsm has had its start moved forward
8644 * lets go ahead and get its new place in the world.
8646 rack_setup_offset_for_rsm(prev, rsm);
8648 * Now nrsm is our new little piece
8649 * that is acked (which was merged
8650 * to prev). Update the rtt and changed
8651 * based on that. Also check for reordering.
8653 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
8654 if (rack->app_limited_needs_set)
8655 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
8656 changed += (nrsm->r_end - nrsm->r_start);
8657 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
8658 if (nrsm->r_flags & RACK_SACK_PASSED) {
8659 rack->r_ctl.rc_reorder_ts = cts;
8661 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__);
8663 counter_u64_add(rack_sack_used_prev_merge, 1);
8666 * This is the case where our previous
8667 * block is not acked either, so we must
8668 * split the block in two.
8670 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
8672 /* failed rrs what can we do but loose the sack info? */
8675 if ((rsm->r_flags & RACK_TLP) &&
8676 (rsm->r_rtr_cnt > 1)) {
8678 * We are splitting a rxt TLP, check
8679 * if we need to save off the start/end
8681 if (rack->rc_last_tlp_acked_set &&
8682 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
8684 * We already turned this on since this block is inside
8685 * the previous one was a partially sack now we
8686 * are getting another one (maybe all of it).
8688 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
8690 * Lets make sure we have all of it though.
8692 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
8693 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8694 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8695 rack->r_ctl.last_tlp_acked_end);
8697 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
8698 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8699 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8700 rack->r_ctl.last_tlp_acked_end);
8703 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8704 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8705 rack->rc_last_tlp_acked_set = 1;
8706 rack->rc_last_tlp_past_cumack = 0;
8707 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
8711 * In this case nrsm becomes
8712 * nrsm->r_start = end;
8713 * nrsm->r_end = rsm->r_end;
8714 * which is un-acked.
8716 * rsm->r_end = nrsm->r_start;
8717 * i.e. the remaining un-acked
8718 * piece is left on the left
8721 * So we start like this
8722 * rsm |----------| (not acked)
8724 * build it so we have
8726 * nrsm |------| (not acked)
8728 counter_u64_add(rack_sack_splits, 1);
8729 rack_clone_rsm(rack, nrsm, rsm, end);
8730 rsm->r_flags &= (~RACK_HAS_FIN);
8731 rsm->r_just_ret = 0;
8733 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8735 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8736 if (insret != NULL) {
8737 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
8738 nrsm, insret, rack, rsm);
8741 if (rsm->r_in_tmap) {
8742 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8743 nrsm->r_in_tmap = 1;
8746 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
8747 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
8748 changed += (rsm->r_end - rsm->r_start);
8749 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
8750 if (rsm->r_in_tmap) /* should be true */
8751 rack_log_sack_passed(tp, rack, rsm);
8752 /* Is Reordering occuring? */
8753 if (rsm->r_flags & RACK_SACK_PASSED) {
8754 rsm->r_flags &= ~RACK_SACK_PASSED;
8755 rack->r_ctl.rc_reorder_ts = cts;
8757 if (rack->app_limited_needs_set)
8758 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
8759 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8760 rsm->r_flags |= RACK_ACKED;
8761 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__);
8762 if (rsm->r_in_tmap) {
8763 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8767 } else if (start != end){
8769 * The block was already acked.
8771 counter_u64_add(rack_sack_skipped_acked, 1);
8776 ((rsm->r_flags & RACK_TLP) == 0) &&
8777 (rsm->r_flags & RACK_ACKED)) {
8779 * Now can we merge where we worked
8780 * with either the previous or
8783 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8785 if (next->r_flags & RACK_TLP)
8787 if (next->r_flags & RACK_ACKED) {
8788 /* yep this and next can be merged */
8789 rsm = rack_merge_rsm(rack, rsm, next);
8790 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8794 /* Now what about the previous? */
8795 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8797 if (prev->r_flags & RACK_TLP)
8799 if (prev->r_flags & RACK_ACKED) {
8800 /* yep the previous and this can be merged */
8801 rsm = rack_merge_rsm(rack, prev, rsm);
8802 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8807 if (used_ref == 0) {
8808 counter_u64_add(rack_sack_proc_all, 1);
8810 counter_u64_add(rack_sack_proc_short, 1);
8812 /* Save off the next one for quick reference. */
8814 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8817 *prsm = rack->r_ctl.rc_sacklast = nrsm;
8818 /* Pass back the moved. */
8824 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
8826 struct rack_sendmap *tmap;
8829 while (rsm && (rsm->r_flags & RACK_ACKED)) {
8830 /* Its no longer sacked, mark it so */
8831 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
8833 if (rsm->r_in_tmap) {
8834 panic("rack:%p rsm:%p flags:0x%x in tmap?",
8835 rack, rsm, rsm->r_flags);
8838 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
8839 /* Rebuild it into our tmap */
8841 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8844 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
8847 tmap->r_in_tmap = 1;
8848 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8851 * Now lets possibly clear the sack filter so we start
8852 * recognizing sacks that cover this area.
8854 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
8859 rack_do_decay(struct tcp_rack *rack)
8863 #define timersub(tvp, uvp, vvp) \
8865 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
8866 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
8867 if ((vvp)->tv_usec < 0) { \
8869 (vvp)->tv_usec += 1000000; \
8873 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res);
8876 rack->r_ctl.input_pkt++;
8877 if ((rack->rc_in_persist) ||
8878 (res.tv_sec >= 1) ||
8879 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) {
8881 * Check for decay of non-SAD,
8882 * we want all SAD detection metrics to
8883 * decay 1/4 per second (or more) passed.
8885 #ifdef NETFLIX_EXP_DETECTION
8888 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt;
8890 /* Update our saved tracking values */
8891 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt;
8892 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
8893 /* Now do we escape without decay? */
8894 #ifdef NETFLIX_EXP_DETECTION
8895 if (rack->rc_in_persist ||
8896 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) ||
8897 (pkt_delta < tcp_sad_low_pps)){
8899 * We don't decay idle connections
8900 * or ones that have a low input pps.
8904 /* Decay the counters */
8905 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count,
8907 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count,
8909 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra,
8911 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move,
8918 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to)
8920 struct rack_sendmap *rsm;
8922 struct rack_sendmap *rm;
8926 * The ACK point is advancing to th_ack, we must drop off
8927 * the packets in the rack log and calculate any eligble
8930 rack->r_wanted_output = 1;
8932 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */
8933 if ((rack->rc_last_tlp_acked_set == 1)&&
8934 (rack->rc_last_tlp_past_cumack == 1) &&
8935 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) {
8937 * We have reached the point where our last rack
8938 * tlp retransmit sequence is ahead of the cum-ack.
8939 * This can only happen when the cum-ack moves all
8940 * the way around (its been a full 2^^31+1 bytes
8941 * or more since we sent a retransmitted TLP). Lets
8942 * turn off the valid flag since its not really valid.
8944 * Note since sack's also turn on this event we have
8945 * a complication, we have to wait to age it out until
8946 * the cum-ack is by the TLP before checking which is
8947 * what the next else clause does.
8949 rack_log_dsack_event(rack, 9, __LINE__,
8950 rack->r_ctl.last_tlp_acked_start,
8951 rack->r_ctl.last_tlp_acked_end);
8952 rack->rc_last_tlp_acked_set = 0;
8953 rack->rc_last_tlp_past_cumack = 0;
8954 } else if ((rack->rc_last_tlp_acked_set == 1) &&
8955 (rack->rc_last_tlp_past_cumack == 0) &&
8956 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) {
8958 * It is safe to start aging TLP's out.
8960 rack->rc_last_tlp_past_cumack = 1;
8962 /* We do the same for the tlp send seq as well */
8963 if ((rack->rc_last_sent_tlp_seq_valid == 1) &&
8964 (rack->rc_last_sent_tlp_past_cumack == 1) &&
8965 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) {
8966 rack_log_dsack_event(rack, 9, __LINE__,
8967 rack->r_ctl.last_sent_tlp_seq,
8968 (rack->r_ctl.last_sent_tlp_seq +
8969 rack->r_ctl.last_sent_tlp_len));
8970 rack->rc_last_sent_tlp_seq_valid = 0;
8971 rack->rc_last_sent_tlp_past_cumack = 0;
8972 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) &&
8973 (rack->rc_last_sent_tlp_past_cumack == 0) &&
8974 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) {
8976 * It is safe to start aging TLP's send.
8978 rack->rc_last_sent_tlp_past_cumack = 1;
8981 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
8983 if ((th_ack - 1) == tp->iss) {
8985 * For the SYN incoming case we will not
8986 * have called tcp_output for the sending of
8987 * the SYN, so there will be no map. All
8988 * other cases should probably be a panic.
8992 if (tp->t_flags & TF_SENTFIN) {
8993 /* if we sent a FIN we often will not have map */
8997 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n",
8999 tp->t_state, th_ack, rack,
9000 tp->snd_una, tp->snd_max, tp->snd_nxt);
9004 if (SEQ_LT(th_ack, rsm->r_start)) {
9005 /* Huh map is missing this */
9007 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
9009 th_ack, tp->t_state, rack->r_state);
9013 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack);
9015 /* Now was it a retransmitted TLP? */
9016 if ((rsm->r_flags & RACK_TLP) &&
9017 (rsm->r_rtr_cnt > 1)) {
9019 * Yes, this rsm was a TLP and retransmitted, remember that
9020 * since if a DSACK comes back on this we don't want
9021 * to think of it as a reordered segment. This may
9022 * get updated again with possibly even other TLPs
9023 * in flight, but thats ok. Only when we don't send
9024 * a retransmitted TLP for 1/2 the sequences space
9025 * will it get turned off (above).
9027 if (rack->rc_last_tlp_acked_set &&
9028 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
9030 * We already turned this on since the end matches,
9031 * the previous one was a partially ack now we
9032 * are getting another one (maybe all of it).
9034 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
9036 * Lets make sure we have all of it though.
9038 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
9039 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9040 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9041 rack->r_ctl.last_tlp_acked_end);
9043 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
9044 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9045 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9046 rack->r_ctl.last_tlp_acked_end);
9049 rack->rc_last_tlp_past_cumack = 1;
9050 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9051 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9052 rack->rc_last_tlp_acked_set = 1;
9053 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
9056 /* Now do we consume the whole thing? */
9057 if (SEQ_GEQ(th_ack, rsm->r_end)) {
9058 /* Its all consumed. */
9060 uint8_t newly_acked;
9062 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__);
9063 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
9064 rsm->r_rtr_bytes = 0;
9065 /* Record the time of highest cumack sent */
9066 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
9068 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
9070 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
9072 panic("removing head in rack:%p rsm:%p rm:%p",
9076 if (rsm->r_in_tmap) {
9077 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
9081 if (rsm->r_flags & RACK_ACKED) {
9083 * It was acked on the scoreboard -- remove
9086 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
9088 } else if (rsm->r_flags & RACK_SACK_PASSED) {
9090 * There are segments ACKED on the
9091 * scoreboard further up. We are seeing
9094 rsm->r_flags &= ~RACK_SACK_PASSED;
9095 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
9096 rsm->r_flags |= RACK_ACKED;
9097 rack->r_ctl.rc_reorder_ts = cts;
9098 if (rack->r_ent_rec_ns) {
9100 * We have sent no more, and we saw an sack
9103 rack->r_might_revert = 1;
9106 if ((rsm->r_flags & RACK_TO_REXT) &&
9107 (tp->t_flags & TF_RCVD_TSTMP) &&
9108 (to->to_flags & TOF_TS) &&
9109 (to->to_tsecr != 0) &&
9110 (tp->t_flags & TF_PREVVALID)) {
9112 * We can use the timestamp to see
9113 * if this retransmission was from the
9114 * first transmit. If so we made a mistake.
9116 tp->t_flags &= ~TF_PREVVALID;
9117 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) {
9118 /* The first transmit is what this ack is for */
9119 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__);
9122 left = th_ack - rsm->r_end;
9123 if (rack->app_limited_needs_set && newly_acked)
9124 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK);
9125 /* Free back to zone */
9126 rack_free(rack, rsm);
9130 /* Check for reneging */
9131 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
9132 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
9134 * The peer has moved snd_una up to
9135 * the edge of this send, i.e. one
9136 * that it had previously acked. The only
9137 * way that can be true if the peer threw
9138 * away data (space issues) that it had
9139 * previously sacked (else it would have
9140 * given us snd_una up to (rsm->r_end).
9141 * We need to undo the acked markings here.
9143 * Note we have to look to make sure th_ack is
9144 * our rsm->r_start in case we get an old ack
9145 * where th_ack is behind snd_una.
9147 rack_peer_reneges(rack, rsm, th_ack);
9151 if (rsm->r_flags & RACK_ACKED) {
9153 * It was acked on the scoreboard -- remove it from
9154 * total for the part being cum-acked.
9156 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
9159 * Clear the dup ack count for
9160 * the piece that remains.
9163 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
9164 if (rsm->r_rtr_bytes) {
9166 * It was retransmitted adjust the
9167 * sack holes for what was acked.
9171 ack_am = (th_ack - rsm->r_start);
9172 if (ack_am >= rsm->r_rtr_bytes) {
9173 rack->r_ctl.rc_holes_rxt -= ack_am;
9174 rsm->r_rtr_bytes -= ack_am;
9178 * Update where the piece starts and record
9179 * the time of send of highest cumack sent.
9181 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
9182 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__);
9183 /* Now we need to move our offset forward too */
9184 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) {
9185 /* Fix up the orig_m_len and possibly the mbuf offset */
9186 rack_adjust_orig_mlen(rsm);
9188 rsm->soff += (th_ack - rsm->r_start);
9189 rsm->r_start = th_ack;
9190 /* Now do we need to move the mbuf fwd too? */
9192 while (rsm->soff >= rsm->m->m_len) {
9193 rsm->soff -= rsm->m->m_len;
9194 rsm->m = rsm->m->m_next;
9195 KASSERT((rsm->m != NULL),
9196 (" nrsm:%p hit at soff:%u null m",
9199 rsm->orig_m_len = rsm->m->m_len;
9201 if (rack->app_limited_needs_set)
9202 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG);
9206 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack)
9208 struct rack_sendmap *rsm;
9209 int sack_pass_fnd = 0;
9211 if (rack->r_might_revert) {
9213 * Ok we have reordering, have not sent anything, we
9214 * might want to revert the congestion state if nothing
9215 * further has SACK_PASSED on it. Lets check.
9217 * We also get here when we have DSACKs come in for
9218 * all the data that we FR'd. Note that a rxt or tlp
9219 * timer clears this from happening.
9222 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
9223 if (rsm->r_flags & RACK_SACK_PASSED) {
9228 if (sack_pass_fnd == 0) {
9230 * We went into recovery
9231 * incorrectly due to reordering!
9235 rack->r_ent_rec_ns = 0;
9236 orig_cwnd = tp->snd_cwnd;
9237 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec;
9238 tp->snd_recover = tp->snd_una;
9239 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__);
9240 EXIT_RECOVERY(tp->t_flags);
9242 rack->r_might_revert = 0;
9246 #ifdef NETFLIX_EXP_DETECTION
9248 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz)
9250 if ((rack->do_detection || tcp_force_detection) &&
9251 tcp_sack_to_ack_thresh &&
9252 tcp_sack_to_move_thresh &&
9253 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) {
9255 * We have thresholds set to find
9256 * possible attackers and disable sack.
9259 uint64_t ackratio, moveratio, movetotal;
9262 rack_log_sad(rack, 1);
9263 ackratio = (uint64_t)(rack->r_ctl.sack_count);
9264 ackratio *= (uint64_t)(1000);
9265 if (rack->r_ctl.ack_count)
9266 ackratio /= (uint64_t)(rack->r_ctl.ack_count);
9268 /* We really should not hit here */
9271 if ((rack->sack_attack_disable == 0) &&
9272 (ackratio > rack_highest_sack_thresh_seen))
9273 rack_highest_sack_thresh_seen = (uint32_t)ackratio;
9274 movetotal = rack->r_ctl.sack_moved_extra;
9275 movetotal += rack->r_ctl.sack_noextra_move;
9276 moveratio = rack->r_ctl.sack_moved_extra;
9277 moveratio *= (uint64_t)1000;
9279 moveratio /= movetotal;
9281 /* No moves, thats pretty good */
9284 if ((rack->sack_attack_disable == 0) &&
9285 (moveratio > rack_highest_move_thresh_seen))
9286 rack_highest_move_thresh_seen = (uint32_t)moveratio;
9287 if (rack->sack_attack_disable == 0) {
9288 if ((ackratio > tcp_sack_to_ack_thresh) &&
9289 (moveratio > tcp_sack_to_move_thresh)) {
9290 /* Disable sack processing */
9291 rack->sack_attack_disable = 1;
9292 if (rack->r_rep_attack == 0) {
9293 rack->r_rep_attack = 1;
9294 counter_u64_add(rack_sack_attacks_detected, 1);
9296 if (tcp_attack_on_turns_on_logging) {
9298 * Turn on logging, used for debugging
9301 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging;
9303 /* Clamp the cwnd at flight size */
9304 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd;
9305 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
9306 rack_log_sad(rack, 2);
9309 /* We are sack-disabled check for false positives */
9310 if ((ackratio <= tcp_restoral_thresh) ||
9311 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) {
9312 rack->sack_attack_disable = 0;
9313 rack_log_sad(rack, 3);
9314 /* Restart counting */
9315 rack->r_ctl.sack_count = 0;
9316 rack->r_ctl.sack_moved_extra = 0;
9317 rack->r_ctl.sack_noextra_move = 1;
9318 rack->r_ctl.ack_count = max(1,
9319 (bytes_this_ack / segsiz));
9321 if (rack->r_rep_reverse == 0) {
9322 rack->r_rep_reverse = 1;
9323 counter_u64_add(rack_sack_attacks_reversed, 1);
9325 /* Restore the cwnd */
9326 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd)
9327 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd;
9335 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end)
9341 if (SEQ_GT(end, start))
9345 if ((rack->rc_last_tlp_acked_set ) &&
9346 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) &&
9347 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) {
9349 * The DSACK is because of a TLP which we don't
9350 * do anything with the reordering window over since
9351 * it was not reordering that caused the DSACK but
9352 * our previous retransmit TLP.
9354 rack_log_dsack_event(rack, 7, __LINE__, start, end);
9356 goto skip_dsack_round;
9358 if (rack->rc_last_sent_tlp_seq_valid) {
9359 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len;
9360 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) &&
9361 (SEQ_LEQ(end, l_end))) {
9363 * This dsack is from the last sent TLP, ignore it
9364 * for reordering purposes.
9366 rack_log_dsack_event(rack, 7, __LINE__, start, end);
9368 goto skip_dsack_round;
9371 if (rack->rc_dsack_round_seen == 0) {
9372 rack->rc_dsack_round_seen = 1;
9373 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max;
9374 rack->r_ctl.num_dsack++;
9375 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */
9376 rack_log_dsack_event(rack, 2, __LINE__, 0, 0);
9380 * We keep track of how many DSACK blocks we get
9381 * after a recovery incident.
9383 rack->r_ctl.dsack_byte_cnt += am;
9384 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
9385 rack->r_ctl.retran_during_recovery &&
9386 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) {
9388 * False recovery most likely culprit is reordering. If
9389 * nothing else is missing we need to revert.
9391 rack->r_might_revert = 1;
9392 rack_handle_might_revert(rack->rc_tp, rack);
9393 rack->r_might_revert = 0;
9394 rack->r_ctl.retran_during_recovery = 0;
9395 rack->r_ctl.dsack_byte_cnt = 0;
9401 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una)
9403 return (((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt);
9407 rack_compute_pipe(struct tcpcb *tp)
9409 return ((int32_t)do_rack_compute_pipe(tp,
9410 (struct tcp_rack *)tp->t_fb_ptr,
9415 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack)
9417 /* Deal with changed and PRR here (in recovery only) */
9418 uint32_t pipe, snd_una;
9420 rack->r_ctl.rc_prr_delivered += changed;
9422 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) {
9424 * It is all outstanding, we are application limited
9425 * and thus we don't need more room to send anything.
9426 * Note we use tp->snd_una here and not th_ack because
9427 * the data as yet not been cut from the sb.
9429 rack->r_ctl.rc_prr_sndcnt = 0;
9432 /* Compute prr_sndcnt */
9433 if (SEQ_GT(tp->snd_una, th_ack)) {
9434 snd_una = tp->snd_una;
9438 pipe = do_rack_compute_pipe(tp, rack, snd_una);
9439 if (pipe > tp->snd_ssthresh) {
9442 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
9443 if (rack->r_ctl.rc_prr_recovery_fs > 0)
9444 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
9446 rack->r_ctl.rc_prr_sndcnt = 0;
9447 rack_log_to_prr(rack, 9, 0, __LINE__);
9451 if (sndcnt > (long)rack->r_ctl.rc_prr_out)
9452 sndcnt -= rack->r_ctl.rc_prr_out;
9455 rack->r_ctl.rc_prr_sndcnt = sndcnt;
9456 rack_log_to_prr(rack, 10, 0, __LINE__);
9460 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
9461 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
9464 if (changed > limit)
9466 limit += ctf_fixed_maxseg(tp);
9467 if (tp->snd_ssthresh > pipe) {
9468 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
9469 rack_log_to_prr(rack, 11, 0, __LINE__);
9471 rack->r_ctl.rc_prr_sndcnt = min(0, limit);
9472 rack_log_to_prr(rack, 12, 0, __LINE__);
9478 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck)
9481 struct tcp_rack *rack;
9482 struct rack_sendmap *rsm;
9483 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
9484 register uint32_t th_ack;
9485 int32_t i, j, k, num_sack_blks = 0;
9486 uint32_t cts, acked, ack_point;
9487 int loop_start = 0, moved_two = 0;
9491 INP_WLOCK_ASSERT(tp->t_inpcb);
9492 if (tcp_get_flags(th) & TH_RST) {
9493 /* We don't log resets */
9496 rack = (struct tcp_rack *)tp->t_fb_ptr;
9497 cts = tcp_get_usecs(NULL);
9498 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
9500 th_ack = th->th_ack;
9501 if (rack->sack_attack_disable == 0)
9502 rack_do_decay(rack);
9503 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) {
9505 * You only get credit for
9506 * MSS and greater (and you get extra
9507 * credit for larger cum-ack moves).
9511 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp);
9512 rack->r_ctl.ack_count += ac;
9513 counter_u64_add(rack_ack_total, ac);
9515 if (rack->r_ctl.ack_count > 0xfff00000) {
9517 * reduce the number to keep us under
9520 rack->r_ctl.ack_count /= 2;
9521 rack->r_ctl.sack_count /= 2;
9523 if (SEQ_GT(th_ack, tp->snd_una)) {
9524 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
9525 tp->t_acktime = ticks;
9527 if (rsm && SEQ_GT(th_ack, rsm->r_start))
9528 changed = th_ack - rsm->r_start;
9530 rack_process_to_cumack(tp, rack, th_ack, cts, to);
9532 if ((to->to_flags & TOF_SACK) == 0) {
9533 /* We are done nothing left and no sack. */
9534 rack_handle_might_revert(tp, rack);
9536 * For cases where we struck a dup-ack
9537 * with no SACK, add to the changes so
9538 * PRR will work right.
9540 if (dup_ack_struck && (changed == 0)) {
9541 changed += ctf_fixed_maxseg(rack->rc_tp);
9545 /* Sack block processing */
9546 if (SEQ_GT(th_ack, tp->snd_una))
9549 ack_point = tp->snd_una;
9550 for (i = 0; i < to->to_nsacks; i++) {
9551 bcopy((to->to_sacks + i * TCPOLEN_SACK),
9552 &sack, sizeof(sack));
9553 sack.start = ntohl(sack.start);
9554 sack.end = ntohl(sack.end);
9555 if (SEQ_GT(sack.end, sack.start) &&
9556 SEQ_GT(sack.start, ack_point) &&
9557 SEQ_LT(sack.start, tp->snd_max) &&
9558 SEQ_GT(sack.end, ack_point) &&
9559 SEQ_LEQ(sack.end, tp->snd_max)) {
9560 sack_blocks[num_sack_blks] = sack;
9562 } else if (SEQ_LEQ(sack.start, th_ack) &&
9563 SEQ_LEQ(sack.end, th_ack)) {
9566 was_tlp = rack_note_dsack(rack, sack.start, sack.end);
9568 * Its a D-SACK block.
9570 tcp_record_dsack(tp, sack.start, sack.end, was_tlp);
9573 if (rack->rc_dsack_round_seen) {
9574 /* Is the dsack roound over? */
9575 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) {
9577 rack->rc_dsack_round_seen = 0;
9578 rack_log_dsack_event(rack, 3, __LINE__, 0, 0);
9582 * Sort the SACK blocks so we can update the rack scoreboard with
9585 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks,
9586 num_sack_blks, th->th_ack);
9587 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks);
9588 if (num_sack_blks == 0) {
9589 /* Nothing to sack (DSACKs?) */
9590 goto out_with_totals;
9592 if (num_sack_blks < 2) {
9593 /* Only one, we don't need to sort */
9596 /* Sort the sacks */
9597 for (i = 0; i < num_sack_blks; i++) {
9598 for (j = i + 1; j < num_sack_blks; j++) {
9599 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
9600 sack = sack_blocks[i];
9601 sack_blocks[i] = sack_blocks[j];
9602 sack_blocks[j] = sack;
9607 * Now are any of the sack block ends the same (yes some
9608 * implementations send these)?
9611 if (num_sack_blks == 0)
9612 goto out_with_totals;
9613 if (num_sack_blks > 1) {
9614 for (i = 0; i < num_sack_blks; i++) {
9615 for (j = i + 1; j < num_sack_blks; j++) {
9616 if (sack_blocks[i].end == sack_blocks[j].end) {
9618 * Ok these two have the same end we
9619 * want the smallest end and then
9620 * throw away the larger and start
9623 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
9625 * The second block covers
9626 * more area use that
9628 sack_blocks[i].start = sack_blocks[j].start;
9631 * Now collapse out the dup-sack and
9634 for (k = (j + 1); k < num_sack_blks; k++) {
9635 sack_blocks[j].start = sack_blocks[k].start;
9636 sack_blocks[j].end = sack_blocks[k].end;
9647 * First lets look to see if
9648 * we have retransmitted and
9649 * can use the transmit next?
9651 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
9653 SEQ_GT(sack_blocks[0].end, rsm->r_start) &&
9654 SEQ_LT(sack_blocks[0].start, rsm->r_end)) {
9656 * We probably did the FR and the next
9657 * SACK in continues as we would expect.
9659 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two);
9661 rack->r_wanted_output = 1;
9664 if (num_sack_blks == 1) {
9666 * This is what we would expect from
9667 * a normal implementation to happen
9668 * after we have retransmitted the FR,
9669 * i.e the sack-filter pushes down
9670 * to 1 block and the next to be retransmitted
9671 * is the sequence in the sack block (has more
9672 * are acked). Count this as ACK'd data to boost
9673 * up the chances of recovering any false positives.
9675 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp));
9676 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp)));
9677 counter_u64_add(rack_express_sack, 1);
9678 if (rack->r_ctl.ack_count > 0xfff00000) {
9680 * reduce the number to keep us under
9683 rack->r_ctl.ack_count /= 2;
9684 rack->r_ctl.sack_count /= 2;
9686 goto out_with_totals;
9689 * Start the loop through the
9690 * rest of blocks, past the first block.
9696 /* Its a sack of some sort */
9697 rack->r_ctl.sack_count++;
9698 if (rack->r_ctl.sack_count > 0xfff00000) {
9700 * reduce the number to keep us under
9703 rack->r_ctl.ack_count /= 2;
9704 rack->r_ctl.sack_count /= 2;
9706 counter_u64_add(rack_sack_total, 1);
9707 if (rack->sack_attack_disable) {
9708 /* An attacker disablement is in place */
9709 if (num_sack_blks > 1) {
9710 rack->r_ctl.sack_count += (num_sack_blks - 1);
9711 rack->r_ctl.sack_moved_extra++;
9712 counter_u64_add(rack_move_some, 1);
9713 if (rack->r_ctl.sack_moved_extra > 0xfff00000) {
9714 rack->r_ctl.sack_moved_extra /= 2;
9715 rack->r_ctl.sack_noextra_move /= 2;
9720 rsm = rack->r_ctl.rc_sacklast;
9721 for (i = loop_start; i < num_sack_blks; i++) {
9722 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two);
9724 rack->r_wanted_output = 1;
9729 * If we did not get a SACK for at least a MSS and
9730 * had to move at all, or if we moved more than our
9731 * threshold, it counts against the "extra" move.
9733 rack->r_ctl.sack_moved_extra += moved_two;
9734 counter_u64_add(rack_move_some, 1);
9737 * else we did not have to move
9738 * any more than we would expect.
9740 rack->r_ctl.sack_noextra_move++;
9741 counter_u64_add(rack_move_none, 1);
9743 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) {
9745 * If the SACK was not a full MSS then
9746 * we add to sack_count the number of
9747 * MSS's (or possibly more than
9748 * a MSS if its a TSO send) we had to skip by.
9750 rack->r_ctl.sack_count += moved_two;
9751 counter_u64_add(rack_sack_total, moved_two);
9754 * Now we need to setup for the next
9755 * round. First we make sure we won't
9756 * exceed the size of our uint32_t on
9757 * the various counts, and then clear out
9760 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) ||
9761 (rack->r_ctl.sack_noextra_move > 0xfff00000)) {
9762 rack->r_ctl.sack_moved_extra /= 2;
9763 rack->r_ctl.sack_noextra_move /= 2;
9765 if (rack->r_ctl.sack_count > 0xfff00000) {
9766 rack->r_ctl.ack_count /= 2;
9767 rack->r_ctl.sack_count /= 2;
9772 if (num_sack_blks > 1) {
9774 * You get an extra stroke if
9775 * you have more than one sack-blk, this
9776 * could be where we are skipping forward
9777 * and the sack-filter is still working, or
9778 * it could be an attacker constantly
9781 rack->r_ctl.sack_moved_extra++;
9782 counter_u64_add(rack_move_some, 1);
9785 #ifdef NETFLIX_EXP_DETECTION
9786 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp));
9789 /* Something changed cancel the rack timer */
9790 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9792 tsused = tcp_get_usecs(NULL);
9793 rsm = tcp_rack_output(tp, rack, tsused);
9794 if ((!IN_FASTRECOVERY(tp->t_flags)) &&
9796 ((rsm->r_flags & RACK_MUST_RXT) == 0)) {
9797 /* Enter recovery */
9798 entered_recovery = 1;
9799 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
9801 * When we enter recovery we need to assure we send
9804 if (rack->rack_no_prr == 0) {
9805 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
9806 rack_log_to_prr(rack, 8, 0, __LINE__);
9808 rack->r_timer_override = 1;
9810 rack->r_ctl.rc_agg_early = 0;
9811 } else if (IN_FASTRECOVERY(tp->t_flags) &&
9813 (rack->r_rr_config == 3)) {
9815 * Assure we can output and we get no
9816 * remembered pace time except the retransmit.
9818 rack->r_timer_override = 1;
9819 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
9820 rack->r_ctl.rc_resend = rsm;
9822 if (IN_FASTRECOVERY(tp->t_flags) &&
9823 (rack->rack_no_prr == 0) &&
9824 (entered_recovery == 0)) {
9825 rack_update_prr(tp, rack, changed, th_ack);
9826 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) &&
9827 ((tcp_in_hpts(rack->rc_inp) == 0) &&
9828 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) {
9830 * If you are pacing output you don't want
9834 rack->r_ctl.rc_agg_early = 0;
9835 rack->r_timer_override = 1;
9841 rack_strike_dupack(struct tcp_rack *rack)
9843 struct rack_sendmap *rsm;
9845 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
9846 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
9847 rsm = TAILQ_NEXT(rsm, r_tnext);
9848 if (rsm->r_flags & RACK_MUST_RXT) {
9849 /* Sendmap entries that are marked to
9850 * be retransmitted do not need dupack's
9851 * struck. We get these marks for a number
9852 * of reasons (rxt timeout with no sack,
9853 * mtu change, or rwnd collapses). When
9854 * these events occur, we know we must retransmit
9855 * them and mark the sendmap entries. Dupack counting
9856 * is not needed since we are already set to retransmit
9857 * it as soon as we can.
9862 if (rsm && (rsm->r_dupack < 0xff)) {
9864 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) {
9868 * Here we see if we need to retransmit. For
9869 * a SACK type connection if enough time has passed
9870 * we will get a return of the rsm. For a non-sack
9871 * connection we will get the rsm returned if the
9872 * dupack value is 3 or more.
9874 cts = tcp_get_usecs(&tv);
9875 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts);
9876 if (rack->r_ctl.rc_resend != NULL) {
9877 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) {
9878 rack_cong_signal(rack->rc_tp, CC_NDUPACK,
9879 rack->rc_tp->snd_una, __LINE__);
9881 rack->r_wanted_output = 1;
9882 rack->r_timer_override = 1;
9883 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3);
9886 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3);
9892 rack_check_bottom_drag(struct tcpcb *tp,
9893 struct tcp_rack *rack,
9894 struct socket *so, int32_t acked)
9896 uint32_t segsiz, minseg;
9898 segsiz = ctf_fixed_maxseg(tp);
9901 if (tp->snd_max == tp->snd_una) {
9903 * We are doing dynamic pacing and we are way
9904 * under. Basically everything got acked while
9905 * we were still waiting on the pacer to expire.
9907 * This means we need to boost the b/w in
9908 * addition to any earlier boosting of
9911 rack->rc_dragged_bottom = 1;
9912 rack_validate_multipliers_at_or_above100(rack);
9914 * Lets use the segment bytes acked plus
9915 * the lowest RTT seen as the basis to
9916 * form a b/w estimate. This will be off
9917 * due to the fact that the true estimate
9918 * should be around 1/2 the time of the RTT
9919 * but we can settle for that.
9921 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) &&
9923 uint64_t bw, calc_bw, rtt;
9925 rtt = rack->r_ctl.rack_rs.rs_us_rtt;
9927 /* no us sample is there a ms one? */
9928 if (rack->r_ctl.rack_rs.rs_rtt_lowest) {
9929 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
9931 goto no_measurement;
9935 calc_bw = bw * 1000000;
9937 if (rack->r_ctl.last_max_bw &&
9938 (rack->r_ctl.last_max_bw < calc_bw)) {
9940 * If we have a last calculated max bw
9943 calc_bw = rack->r_ctl.last_max_bw;
9945 /* now plop it in */
9946 if (rack->rc_gp_filled == 0) {
9947 if (calc_bw > ONE_POINT_TWO_MEG) {
9949 * If we have no measurement
9950 * don't let us set in more than
9951 * 1.2Mbps. If we are still too
9952 * low after pacing with this we
9953 * will hopefully have a max b/w
9954 * available to sanity check things.
9956 calc_bw = ONE_POINT_TWO_MEG;
9958 rack->r_ctl.rc_rtt_diff = 0;
9959 rack->r_ctl.gp_bw = calc_bw;
9960 rack->rc_gp_filled = 1;
9961 if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
9962 rack->r_ctl.num_measurements = RACK_REQ_AVG;
9963 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
9964 } else if (calc_bw > rack->r_ctl.gp_bw) {
9965 rack->r_ctl.rc_rtt_diff = 0;
9966 if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
9967 rack->r_ctl.num_measurements = RACK_REQ_AVG;
9968 rack->r_ctl.gp_bw = calc_bw;
9969 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
9971 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9972 if ((rack->gp_ready == 0) &&
9973 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
9974 /* We have enough measurements now */
9976 rack_set_cc_pacing(rack);
9977 if (rack->defer_options)
9978 rack_apply_deferred_options(rack);
9981 * For acks over 1mss we do a extra boost to simulate
9982 * where we would get 2 acks (we want 110 for the mul).
9985 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9988 * zero rtt possibly?, settle for just an old increase.
9991 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9993 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
9994 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)),
9996 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) &&
9997 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) &&
9998 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <=
9999 (segsiz * rack_req_segs))) {
10001 * We are doing dynamic GP pacing and
10002 * we have everything except 1MSS or less
10003 * bytes left out. We are still pacing away.
10004 * And there is data that could be sent, This
10005 * means we are inserting delayed ack time in
10006 * our measurements because we are pacing too slow.
10008 rack_validate_multipliers_at_or_above100(rack);
10009 rack->rc_dragged_bottom = 1;
10010 rack_increase_bw_mul(rack, -1, 0, 0, 1);
10017 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount)
10020 * The fast output path is enabled and we
10021 * have moved the cumack forward. Lets see if
10022 * we can expand forward the fast path length by
10023 * that amount. What we would ideally like to
10024 * do is increase the number of bytes in the
10025 * fast path block (left_to_send) by the
10026 * acked amount. However we have to gate that
10028 * 1) The amount outstanding and the rwnd of the peer
10029 * (i.e. we don't want to exceed the rwnd of the peer).
10031 * 2) The amount of data left in the socket buffer (i.e.
10032 * we can't send beyond what is in the buffer).
10034 * Note that this does not take into account any increase
10035 * in the cwnd. We will only extend the fast path by
10038 uint32_t new_total, gating_val;
10040 new_total = acked_amount + rack->r_ctl.fsb.left_to_send;
10041 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)),
10042 (tp->snd_wnd - (tp->snd_max - tp->snd_una)));
10043 if (new_total <= gating_val) {
10044 /* We can increase left_to_send by the acked amount */
10045 counter_u64_add(rack_extended_rfo, 1);
10046 rack->r_ctl.fsb.left_to_send = new_total;
10047 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))),
10048 ("rack:%p left_to_send:%u sbavail:%u out:%u",
10049 rack, rack->r_ctl.fsb.left_to_send,
10050 sbavail(&rack->rc_inp->inp_socket->so_snd),
10051 (tp->snd_max - tp->snd_una)));
10057 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una)
10060 * Here any sendmap entry that points to the
10061 * beginning mbuf must be adjusted to the correct
10062 * offset. This must be called with:
10063 * 1) The socket buffer locked
10064 * 2) snd_una adjusted to its new postion.
10066 * Note that (2) implies rack_ack_received has also
10069 * We grab the first mbuf in the socket buffer and
10070 * then go through the front of the sendmap, recalculating
10071 * the stored offset for any sendmap entry that has
10072 * that mbuf. We must use the sb functions to do this
10073 * since its possible an add was done has well as
10074 * the subtraction we may have just completed. This should
10075 * not be a penalty though, since we just referenced the sb
10076 * to go in and trim off the mbufs that we freed (of course
10077 * there will be a penalty for the sendmap references though).
10080 struct rack_sendmap *rsm;
10082 SOCKBUF_LOCK_ASSERT(sb);
10084 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
10085 if ((rsm == NULL) || (m == NULL)) {
10086 /* Nothing outstanding */
10089 while (rsm->m && (rsm->m == m)) {
10090 /* one to adjust */
10095 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff);
10096 if (rsm->orig_m_len != m->m_len) {
10097 rack_adjust_orig_mlen(rsm);
10099 if (rsm->soff != soff) {
10101 * This is not a fatal error, we anticipate it
10102 * might happen (the else code), so we count it here
10103 * so that under invariant we can see that it really
10106 counter_u64_add(rack_adjust_map_bw, 1);
10111 rsm->orig_m_len = rsm->m->m_len;
10113 rsm->orig_m_len = 0;
10115 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff);
10117 rsm->orig_m_len = rsm->m->m_len;
10119 rsm->orig_m_len = 0;
10121 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
10129 * Return value of 1, we do not need to call rack_process_data().
10130 * return value of 0, rack_process_data can be called.
10131 * For ret_val if its 0 the TCP is locked, if its non-zero
10132 * its unlocked and probably unsafe to touch the TCB.
10135 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
10136 struct tcpcb *tp, struct tcpopt *to,
10137 uint32_t tiwin, int32_t tlen,
10138 int32_t * ofia, int32_t thflags, int32_t *ret_val)
10140 int32_t ourfinisacked = 0;
10141 int32_t nsegs, acked_amount;
10143 struct mbuf *mfree;
10144 struct tcp_rack *rack;
10145 int32_t under_pacing = 0;
10146 int32_t recovery = 0;
10148 rack = (struct tcp_rack *)tp->t_fb_ptr;
10149 if (SEQ_GT(th->th_ack, tp->snd_max)) {
10150 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val,
10151 &rack->r_ctl.challenge_ack_ts,
10152 &rack->r_ctl.challenge_ack_cnt);
10153 rack->r_wanted_output = 1;
10156 if (rack->gp_ready &&
10157 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
10160 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
10161 int in_rec, dup_ack_struck = 0;
10163 in_rec = IN_FASTRECOVERY(tp->t_flags);
10164 if (rack->rc_in_persist) {
10165 tp->t_rxtshift = 0;
10166 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
10167 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
10169 if ((th->th_ack == tp->snd_una) &&
10170 (tiwin == tp->snd_wnd) &&
10171 ((to->to_flags & TOF_SACK) == 0)) {
10172 rack_strike_dupack(rack);
10173 dup_ack_struck = 1;
10175 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck);
10177 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
10179 * Old ack, behind (or duplicate to) the last one rcv'd
10180 * Note: We mark reordering is occuring if its
10181 * less than and we have not closed our window.
10183 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) {
10184 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
10189 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
10190 * something we sent.
10192 if (tp->t_flags & TF_NEEDSYN) {
10194 * T/TCP: Connection was half-synchronized, and our SYN has
10195 * been ACK'd (so connection is now fully synchronized). Go
10196 * to non-starred state, increment snd_una for ACK of SYN,
10197 * and check if we can do window scaling.
10199 tp->t_flags &= ~TF_NEEDSYN;
10201 /* Do window scaling? */
10202 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
10203 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
10204 tp->rcv_scale = tp->request_r_scale;
10205 /* Send window already scaled. */
10208 nsegs = max(1, m->m_pkthdr.lro_nsegs);
10209 INP_WLOCK_ASSERT(tp->t_inpcb);
10211 acked = BYTES_THIS_ACK(tp, th);
10214 * Any time we move the cum-ack forward clear
10215 * keep-alive tied probe-not-answered. The
10216 * persists clears its own on entry.
10218 rack->probe_not_answered = 0;
10220 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
10221 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
10223 * If we just performed our first retransmit, and the ACK arrives
10224 * within our recovery window, then it was a mistake to do the
10225 * retransmit in the first place. Recover our original cwnd and
10226 * ssthresh, and proceed to transmit where we left off.
10228 if ((tp->t_flags & TF_PREVVALID) &&
10229 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
10230 tp->t_flags &= ~TF_PREVVALID;
10231 if (tp->t_rxtshift == 1 &&
10232 (int)(ticks - tp->t_badrxtwin) < 0)
10233 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__);
10236 /* assure we are not backed off */
10237 tp->t_rxtshift = 0;
10238 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
10239 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
10240 rack->rc_tlp_in_progress = 0;
10241 rack->r_ctl.rc_tlp_cnt_out = 0;
10243 * If it is the RXT timer we want to
10244 * stop it, so we can restart a TLP.
10246 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
10247 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
10248 #ifdef NETFLIX_HTTP_LOGGING
10249 tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
10253 * If we have a timestamp reply, update smoothed round trip time. If
10254 * no timestamp is present but transmit timer is running and timed
10255 * sequence number was acked, update smoothed round trip time. Since
10256 * we now have an rtt measurement, cancel the timer backoff (cf.,
10257 * Phil Karn's retransmit alg.). Recompute the initial retransmit
10260 * Some boxes send broken timestamp replies during the SYN+ACK
10261 * phase, ignore timestamps of 0 or we could calculate a huge RTT
10262 * and blow up the retransmit timer.
10265 * If all outstanding data is acked, stop retransmit timer and
10266 * remember to restart (more output or persist). If there is more
10267 * data to be acked, restart retransmit timer, using current
10268 * (possibly backed-off) value.
10272 *ofia = ourfinisacked;
10275 if (IN_RECOVERY(tp->t_flags)) {
10276 if (SEQ_LT(th->th_ack, tp->snd_recover) &&
10277 (SEQ_LT(th->th_ack, tp->snd_max))) {
10278 tcp_rack_partialack(tp);
10280 rack_post_recovery(tp, th->th_ack);
10285 * Let the congestion control algorithm update congestion control
10286 * related information. This typically means increasing the
10287 * congestion window.
10289 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery);
10290 SOCKBUF_LOCK(&so->so_snd);
10291 acked_amount = min(acked, (int)sbavail(&so->so_snd));
10292 tp->snd_wnd -= acked_amount;
10293 mfree = sbcut_locked(&so->so_snd, acked_amount);
10294 if ((sbused(&so->so_snd) == 0) &&
10295 (acked > acked_amount) &&
10296 (tp->t_state >= TCPS_FIN_WAIT_1) &&
10297 (tp->t_flags & TF_SENTFIN)) {
10299 * We must be sure our fin
10300 * was sent and acked (we can be
10301 * in FIN_WAIT_1 without having
10306 tp->snd_una = th->th_ack;
10307 if (acked_amount && sbavail(&so->so_snd))
10308 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
10309 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
10310 /* NB: sowwakeup_locked() does an implicit unlock. */
10311 sowwakeup_locked(so);
10313 if (SEQ_GT(tp->snd_una, tp->snd_recover))
10314 tp->snd_recover = tp->snd_una;
10316 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
10317 tp->snd_nxt = tp->snd_una;
10319 if (under_pacing &&
10320 (rack->use_fixed_rate == 0) &&
10321 (rack->in_probe_rtt == 0) &&
10322 rack->rc_gp_dyn_mul &&
10323 rack->rc_always_pace) {
10324 /* Check if we are dragging bottom */
10325 rack_check_bottom_drag(tp, rack, so, acked);
10327 if (tp->snd_una == tp->snd_max) {
10328 /* Nothing left outstanding */
10329 tp->t_flags &= ~TF_PREVVALID;
10330 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
10331 rack->r_ctl.retran_during_recovery = 0;
10332 rack->r_ctl.dsack_byte_cnt = 0;
10333 if (rack->r_ctl.rc_went_idle_time == 0)
10334 rack->r_ctl.rc_went_idle_time = 1;
10335 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
10336 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
10338 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
10339 /* Set need output so persist might get set */
10340 rack->r_wanted_output = 1;
10341 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
10342 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
10343 (sbavail(&so->so_snd) == 0) &&
10344 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
10346 * The socket was gone and the
10347 * peer sent data (now or in the past), time to
10351 /* tcp_close will kill the inp pre-log the Reset */
10352 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
10353 tp = tcp_close(tp);
10354 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
10359 *ofia = ourfinisacked;
10365 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line,
10366 int dir, uint32_t flags, struct rack_sendmap *rsm)
10368 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
10369 union tcp_log_stackspecific log;
10372 memset(&log, 0, sizeof(log));
10373 log.u_bbr.flex1 = cnt;
10374 log.u_bbr.flex2 = split;
10375 log.u_bbr.flex3 = out;
10376 log.u_bbr.flex4 = line;
10377 log.u_bbr.flex5 = rack->r_must_retran;
10378 log.u_bbr.flex6 = flags;
10379 log.u_bbr.flex7 = rack->rc_has_collapsed;
10380 log.u_bbr.flex8 = dir; /*
10381 * 1 is collapsed, 0 is uncollapsed,
10382 * 2 is log of a rsm being marked, 3 is a split.
10385 log.u_bbr.rttProp = 0;
10387 log.u_bbr.rttProp = (uint64_t)rsm;
10388 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
10389 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
10390 TCP_LOG_EVENTP(rack->rc_tp, NULL,
10391 &rack->rc_inp->inp_socket->so_rcv,
10392 &rack->rc_inp->inp_socket->so_snd,
10393 TCP_RACK_LOG_COLLAPSE, 0,
10394 0, &log, false, &tv);
10399 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, int line)
10402 * Here all we do is mark the collapsed point and set the flag.
10403 * This may happen again and again, but there is no
10404 * sense splitting our map until we know where the
10405 * peer finally lands in the collapse.
10407 rack_trace_point(rack, RACK_TP_COLLAPSED_WND);
10408 if ((rack->rc_has_collapsed == 0) ||
10409 (rack->r_ctl.last_collapse_point != (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)))
10410 counter_u64_add(rack_collapsed_win_seen, 1);
10411 rack->r_ctl.last_collapse_point = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd;
10412 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max;
10413 rack->rc_has_collapsed = 1;
10414 rack->r_collapse_point_valid = 1;
10415 rack_log_collapse(rack, 0, 0, rack->r_ctl.last_collapse_point, line, 1, 0, NULL);
10419 rack_un_collapse_window(struct tcp_rack *rack, int line)
10421 struct rack_sendmap *nrsm, *rsm, fe;
10422 int cnt = 0, split = 0;
10424 struct rack_sendmap *insret;
10427 memset(&fe, 0, sizeof(fe));
10428 rack->rc_has_collapsed = 0;
10429 fe.r_start = rack->r_ctl.last_collapse_point;
10430 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
10432 /* Nothing to do maybe the peer ack'ed it all */
10433 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL);
10436 /* Now do we need to split this one? */
10437 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) {
10438 rack_log_collapse(rack, rsm->r_start, rsm->r_end,
10439 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm);
10440 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
10441 if (nrsm == NULL) {
10442 /* We can't get a rsm, mark all? */
10448 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point);
10450 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
10452 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
10453 if (insret != NULL) {
10454 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
10455 nrsm, insret, rack, rsm);
10458 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT,
10459 rack->r_ctl.last_collapse_point, __LINE__);
10460 if (rsm->r_in_tmap) {
10461 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
10462 nrsm->r_in_tmap = 1;
10465 * Set in the new RSM as the
10466 * collapsed starting point
10471 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) {
10472 nrsm->r_flags |= RACK_RWND_COLLAPSED;
10473 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm);
10477 counter_u64_add(rack_collapsed_win, 1);
10479 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL);
10483 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack,
10484 int32_t tlen, int32_t tfo_syn)
10486 if (DELAY_ACK(tp, tlen) || tfo_syn) {
10487 if (rack->rc_dack_mode &&
10489 (rack->rc_dack_toggle == 1)) {
10490 goto no_delayed_ack;
10492 rack_timer_cancel(tp, rack,
10493 rack->r_ctl.rc_rcvtime, __LINE__);
10494 tp->t_flags |= TF_DELACK;
10497 rack->r_wanted_output = 1;
10498 tp->t_flags |= TF_ACKNOW;
10499 if (rack->rc_dack_mode) {
10500 if (tp->t_flags & TF_DELACK)
10501 rack->rc_dack_toggle = 1;
10503 rack->rc_dack_toggle = 0;
10509 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack)
10512 * If fast output is in progress, lets validate that
10513 * the new window did not shrink on us and make it
10514 * so fast output should end.
10516 if (rack->r_fast_output) {
10520 * Calculate what we will send if left as is
10521 * and compare that to our send window.
10523 out = ctf_outstanding(tp);
10524 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) {
10525 /* ok we have an issue */
10526 if (out >= tp->snd_wnd) {
10527 /* Turn off fast output the window is met or collapsed */
10528 rack->r_fast_output = 0;
10530 /* we have some room left */
10531 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out;
10532 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) {
10533 /* If not at least 1 full segment never mind */
10534 rack->r_fast_output = 0;
10543 * Return value of 1, the TCB is unlocked and most
10544 * likely gone, return value of 0, the TCP is still
10548 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
10549 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
10550 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
10553 * Update window information. Don't look at window if no ACK: TAC's
10554 * send garbage on first SYN.
10558 struct tcp_rack *rack;
10560 rack = (struct tcp_rack *)tp->t_fb_ptr;
10561 INP_WLOCK_ASSERT(tp->t_inpcb);
10562 nsegs = max(1, m->m_pkthdr.lro_nsegs);
10563 if ((thflags & TH_ACK) &&
10564 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
10565 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
10566 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
10567 /* keep track of pure window updates */
10569 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
10570 KMOD_TCPSTAT_INC(tcps_rcvwinupd);
10571 tp->snd_wnd = tiwin;
10572 rack_validate_fo_sendwin_up(tp, rack);
10573 tp->snd_wl1 = th->th_seq;
10574 tp->snd_wl2 = th->th_ack;
10575 if (tp->snd_wnd > tp->max_sndwnd)
10576 tp->max_sndwnd = tp->snd_wnd;
10577 rack->r_wanted_output = 1;
10578 } else if (thflags & TH_ACK) {
10579 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
10580 tp->snd_wnd = tiwin;
10581 rack_validate_fo_sendwin_up(tp, rack);
10582 tp->snd_wl1 = th->th_seq;
10583 tp->snd_wl2 = th->th_ack;
10586 if (tp->snd_wnd < ctf_outstanding(tp))
10587 /* The peer collapsed the window */
10588 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__);
10589 else if (rack->rc_has_collapsed)
10590 rack_un_collapse_window(rack, __LINE__);
10591 if ((rack->r_collapse_point_valid) &&
10592 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point)))
10593 rack->r_collapse_point_valid = 0;
10594 /* Was persist timer active and now we have window space? */
10595 if ((rack->rc_in_persist != 0) &&
10596 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
10597 rack->r_ctl.rc_pace_min_segs))) {
10598 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10599 tp->snd_nxt = tp->snd_max;
10600 /* Make sure we output to start the timer */
10601 rack->r_wanted_output = 1;
10603 /* Do we enter persists? */
10604 if ((rack->rc_in_persist == 0) &&
10605 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
10606 TCPS_HAVEESTABLISHED(tp->t_state) &&
10607 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
10608 sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
10609 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
10611 * Here the rwnd is less than
10612 * the pacing size, we are established,
10613 * nothing is outstanding, and there is
10614 * data to send. Enter persists.
10616 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10618 if (tp->t_flags2 & TF2_DROP_AF_DATA) {
10623 * don't process the URG bit, ignore them drag
10626 tp->rcv_up = tp->rcv_nxt;
10627 INP_WLOCK_ASSERT(tp->t_inpcb);
10630 * Process the segment text, merging it into the TCP sequencing
10631 * queue, and arranging for acknowledgment of receipt if necessary.
10632 * This process logically involves adjusting tp->rcv_wnd as data is
10633 * presented to the user (this happens in tcp_usrreq.c, case
10634 * PRU_RCVD). If a FIN has already been received on this connection
10635 * then we just ignore the text.
10637 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
10638 IS_FASTOPEN(tp->t_flags));
10639 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
10640 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
10641 tcp_seq save_start = th->th_seq;
10642 tcp_seq save_rnxt = tp->rcv_nxt;
10643 int save_tlen = tlen;
10645 m_adj(m, drop_hdrlen); /* delayed header drop */
10647 * Insert segment which includes th into TCP reassembly
10648 * queue with control block tp. Set thflags to whether
10649 * reassembly now includes a segment with FIN. This handles
10650 * the common case inline (segment is the next to be
10651 * received on an established connection, and the queue is
10652 * empty), avoiding linkage into and removal from the queue
10653 * and repetition of various conversions. Set DELACK for
10654 * segments received in order, but ack immediately when
10655 * segments are out of order (so fast retransmit can work).
10657 if (th->th_seq == tp->rcv_nxt &&
10659 (TCPS_HAVEESTABLISHED(tp->t_state) ||
10661 #ifdef NETFLIX_SB_LIMITS
10662 u_int mcnt, appended;
10664 if (so->so_rcv.sb_shlim) {
10665 mcnt = m_memcnt(m);
10667 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
10668 CFO_NOSLEEP, NULL) == false) {
10669 counter_u64_add(tcp_sb_shlim_fails, 1);
10675 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn);
10676 tp->rcv_nxt += tlen;
10678 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
10679 (tp->t_fbyte_in == 0)) {
10680 tp->t_fbyte_in = ticks;
10681 if (tp->t_fbyte_in == 0)
10682 tp->t_fbyte_in = 1;
10683 if (tp->t_fbyte_out && tp->t_fbyte_in)
10684 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
10686 thflags = tcp_get_flags(th) & TH_FIN;
10687 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
10688 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
10689 SOCKBUF_LOCK(&so->so_rcv);
10690 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10693 #ifdef NETFLIX_SB_LIMITS
10696 sbappendstream_locked(&so->so_rcv, m, 0);
10698 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
10699 /* NB: sorwakeup_locked() does an implicit unlock. */
10700 sorwakeup_locked(so);
10701 #ifdef NETFLIX_SB_LIMITS
10702 if (so->so_rcv.sb_shlim && appended != mcnt)
10703 counter_fo_release(so->so_rcv.sb_shlim,
10708 * XXX: Due to the header drop above "th" is
10709 * theoretically invalid by now. Fortunately
10710 * m_adj() doesn't actually frees any mbufs when
10711 * trimming from the head.
10713 tcp_seq temp = save_start;
10715 thflags = tcp_reass(tp, th, &temp, &tlen, m);
10716 tp->t_flags |= TF_ACKNOW;
10717 if (tp->t_flags & TF_WAKESOR) {
10718 tp->t_flags &= ~TF_WAKESOR;
10719 /* NB: sorwakeup_locked() does an implicit unlock. */
10720 sorwakeup_locked(so);
10723 if ((tp->t_flags & TF_SACK_PERMIT) &&
10725 TCPS_HAVEESTABLISHED(tp->t_state)) {
10726 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
10728 * DSACK actually handled in the fastpath
10731 RACK_OPTS_INC(tcp_sack_path_1);
10732 tcp_update_sack_list(tp, save_start,
10733 save_start + save_tlen);
10734 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
10735 if ((tp->rcv_numsacks >= 1) &&
10736 (tp->sackblks[0].end == save_start)) {
10738 * Partial overlap, recorded at todrop
10741 RACK_OPTS_INC(tcp_sack_path_2a);
10742 tcp_update_sack_list(tp,
10743 tp->sackblks[0].start,
10744 tp->sackblks[0].end);
10746 RACK_OPTS_INC(tcp_sack_path_2b);
10747 tcp_update_dsack_list(tp, save_start,
10748 save_start + save_tlen);
10750 } else if (tlen >= save_tlen) {
10751 /* Update of sackblks. */
10752 RACK_OPTS_INC(tcp_sack_path_3);
10753 tcp_update_dsack_list(tp, save_start,
10754 save_start + save_tlen);
10755 } else if (tlen > 0) {
10756 RACK_OPTS_INC(tcp_sack_path_4);
10757 tcp_update_dsack_list(tp, save_start,
10758 save_start + tlen);
10763 thflags &= ~TH_FIN;
10767 * If FIN is received ACK the FIN and let the user know that the
10768 * connection is closing.
10770 if (thflags & TH_FIN) {
10771 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
10772 /* The socket upcall is handled by socantrcvmore. */
10775 * If connection is half-synchronized (ie NEEDSYN
10776 * flag on) then delay ACK, so it may be piggybacked
10777 * when SYN is sent. Otherwise, since we received a
10778 * FIN then no more input can be expected, send ACK
10781 if (tp->t_flags & TF_NEEDSYN) {
10782 rack_timer_cancel(tp, rack,
10783 rack->r_ctl.rc_rcvtime, __LINE__);
10784 tp->t_flags |= TF_DELACK;
10786 tp->t_flags |= TF_ACKNOW;
10790 switch (tp->t_state) {
10792 * In SYN_RECEIVED and ESTABLISHED STATES enter the
10793 * CLOSE_WAIT state.
10795 case TCPS_SYN_RECEIVED:
10796 tp->t_starttime = ticks;
10798 case TCPS_ESTABLISHED:
10799 rack_timer_cancel(tp, rack,
10800 rack->r_ctl.rc_rcvtime, __LINE__);
10801 tcp_state_change(tp, TCPS_CLOSE_WAIT);
10805 * If still in FIN_WAIT_1 STATE FIN has not been
10806 * acked so enter the CLOSING state.
10808 case TCPS_FIN_WAIT_1:
10809 rack_timer_cancel(tp, rack,
10810 rack->r_ctl.rc_rcvtime, __LINE__);
10811 tcp_state_change(tp, TCPS_CLOSING);
10815 * In FIN_WAIT_2 state enter the TIME_WAIT state,
10816 * starting the time-wait timer, turning off the
10817 * other standard timers.
10819 case TCPS_FIN_WAIT_2:
10820 rack_timer_cancel(tp, rack,
10821 rack->r_ctl.rc_rcvtime, __LINE__);
10827 * Return any desired output.
10829 if ((tp->t_flags & TF_ACKNOW) ||
10830 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
10831 rack->r_wanted_output = 1;
10833 INP_WLOCK_ASSERT(tp->t_inpcb);
10838 * Here nothing is really faster, its just that we
10839 * have broken out the fast-data path also just like
10843 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
10844 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10845 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos)
10848 int32_t newsize = 0; /* automatic sockbuf scaling */
10849 struct tcp_rack *rack;
10850 #ifdef NETFLIX_SB_LIMITS
10851 u_int mcnt, appended;
10855 * The size of tcp_saveipgen must be the size of the max ip header,
10858 u_char tcp_saveipgen[IP6_HDR_LEN];
10859 struct tcphdr tcp_savetcp;
10864 * If last ACK falls within this segment's sequence numbers, record
10865 * the timestamp. NOTE that the test is modified according to the
10866 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
10868 if (__predict_false(th->th_seq != tp->rcv_nxt)) {
10871 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
10874 if (tiwin && tiwin != tp->snd_wnd) {
10877 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
10880 if (__predict_false((to->to_flags & TOF_TS) &&
10881 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
10884 if (__predict_false((th->th_ack != tp->snd_una))) {
10887 if (__predict_false(tlen > sbspace(&so->so_rcv))) {
10890 if ((to->to_flags & TOF_TS) != 0 &&
10891 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
10892 tp->ts_recent_age = tcp_ts_getticks();
10893 tp->ts_recent = to->to_tsval;
10895 rack = (struct tcp_rack *)tp->t_fb_ptr;
10897 * This is a pure, in-sequence data packet with nothing on the
10898 * reassembly queue and we have enough buffer space to take it.
10900 nsegs = max(1, m->m_pkthdr.lro_nsegs);
10902 #ifdef NETFLIX_SB_LIMITS
10903 if (so->so_rcv.sb_shlim) {
10904 mcnt = m_memcnt(m);
10906 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
10907 CFO_NOSLEEP, NULL) == false) {
10908 counter_u64_add(tcp_sb_shlim_fails, 1);
10914 /* Clean receiver SACK report if present */
10915 if (tp->rcv_numsacks)
10916 tcp_clean_sackreport(tp);
10917 KMOD_TCPSTAT_INC(tcps_preddat);
10918 tp->rcv_nxt += tlen;
10920 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
10921 (tp->t_fbyte_in == 0)) {
10922 tp->t_fbyte_in = ticks;
10923 if (tp->t_fbyte_in == 0)
10924 tp->t_fbyte_in = 1;
10925 if (tp->t_fbyte_out && tp->t_fbyte_in)
10926 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
10929 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
10931 tp->snd_wl1 = th->th_seq;
10933 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
10935 tp->rcv_up = tp->rcv_nxt;
10936 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
10937 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
10939 if (so->so_options & SO_DEBUG)
10940 tcp_trace(TA_INPUT, ostate, tp,
10941 (void *)tcp_saveipgen, &tcp_savetcp, 0);
10943 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
10945 /* Add data to socket buffer. */
10946 SOCKBUF_LOCK(&so->so_rcv);
10947 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10951 * Set new socket buffer size. Give up when limit is
10955 if (!sbreserve_locked(so, SO_RCV, newsize, NULL))
10956 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
10957 m_adj(m, drop_hdrlen); /* delayed header drop */
10958 #ifdef NETFLIX_SB_LIMITS
10961 sbappendstream_locked(&so->so_rcv, m, 0);
10962 ctf_calc_rwin(so, tp);
10964 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
10965 /* NB: sorwakeup_locked() does an implicit unlock. */
10966 sorwakeup_locked(so);
10967 #ifdef NETFLIX_SB_LIMITS
10968 if (so->so_rcv.sb_shlim && mcnt != appended)
10969 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended);
10971 rack_handle_delayed_ack(tp, rack, tlen, 0);
10972 if (tp->snd_una == tp->snd_max)
10973 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
10978 * This subfunction is used to try to highly optimize the
10979 * fast path. We again allow window updates that are
10980 * in sequence to remain in the fast-path. We also add
10981 * in the __predict's to attempt to help the compiler.
10982 * Note that if we return a 0, then we can *not* process
10983 * it and the caller should push the packet into the
10987 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
10988 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10989 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
10995 * The size of tcp_saveipgen must be the size of the max ip header,
10998 u_char tcp_saveipgen[IP6_HDR_LEN];
10999 struct tcphdr tcp_savetcp;
11002 int32_t under_pacing = 0;
11003 struct tcp_rack *rack;
11005 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
11006 /* Old ack, behind (or duplicate to) the last one rcv'd */
11009 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
11010 /* Above what we have sent? */
11013 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
11014 /* We are retransmitting */
11017 if (__predict_false(tiwin == 0)) {
11021 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
11022 /* We need a SYN or a FIN, unlikely.. */
11025 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
11026 /* Timestamp is behind .. old ack with seq wrap? */
11029 if (__predict_false(IN_RECOVERY(tp->t_flags))) {
11030 /* Still recovering */
11033 rack = (struct tcp_rack *)tp->t_fb_ptr;
11034 if (rack->r_ctl.rc_sacked) {
11035 /* We have sack holes on our scoreboard */
11038 /* Ok if we reach here, we can process a fast-ack */
11039 if (rack->gp_ready &&
11040 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
11043 nsegs = max(1, m->m_pkthdr.lro_nsegs);
11044 rack_log_ack(tp, to, th, 0, 0);
11045 /* Did the window get updated? */
11046 if (tiwin != tp->snd_wnd) {
11047 tp->snd_wnd = tiwin;
11048 rack_validate_fo_sendwin_up(tp, rack);
11049 tp->snd_wl1 = th->th_seq;
11050 if (tp->snd_wnd > tp->max_sndwnd)
11051 tp->max_sndwnd = tp->snd_wnd;
11053 /* Do we exit persists? */
11054 if ((rack->rc_in_persist != 0) &&
11055 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
11056 rack->r_ctl.rc_pace_min_segs))) {
11057 rack_exit_persist(tp, rack, cts);
11059 /* Do we enter persists? */
11060 if ((rack->rc_in_persist == 0) &&
11061 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
11062 TCPS_HAVEESTABLISHED(tp->t_state) &&
11063 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
11064 sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
11065 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
11067 * Here the rwnd is less than
11068 * the pacing size, we are established,
11069 * nothing is outstanding, and there is
11070 * data to send. Enter persists.
11072 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
11075 * If last ACK falls within this segment's sequence numbers, record
11076 * the timestamp. NOTE that the test is modified according to the
11077 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
11079 if ((to->to_flags & TOF_TS) != 0 &&
11080 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
11081 tp->ts_recent_age = tcp_ts_getticks();
11082 tp->ts_recent = to->to_tsval;
11085 * This is a pure ack for outstanding data.
11087 KMOD_TCPSTAT_INC(tcps_predack);
11090 * "bad retransmit" recovery.
11092 if ((tp->t_flags & TF_PREVVALID) &&
11093 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
11094 tp->t_flags &= ~TF_PREVVALID;
11095 if (tp->t_rxtshift == 1 &&
11096 (int)(ticks - tp->t_badrxtwin) < 0)
11097 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__);
11100 * Recalculate the transmit timer / rtt.
11102 * Some boxes send broken timestamp replies during the SYN+ACK
11103 * phase, ignore timestamps of 0 or we could calculate a huge RTT
11104 * and blow up the retransmit timer.
11106 acked = BYTES_THIS_ACK(tp, th);
11109 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
11110 hhook_run_tcp_est_in(tp, th, to);
11112 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
11113 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
11115 struct mbuf *mfree;
11117 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0);
11118 SOCKBUF_LOCK(&so->so_snd);
11119 mfree = sbcut_locked(&so->so_snd, acked);
11120 tp->snd_una = th->th_ack;
11121 /* Note we want to hold the sb lock through the sendmap adjust */
11122 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
11123 /* Wake up the socket if we have room to write more */
11124 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
11125 sowwakeup_locked(so);
11127 tp->t_rxtshift = 0;
11128 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
11129 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
11130 rack->rc_tlp_in_progress = 0;
11131 rack->r_ctl.rc_tlp_cnt_out = 0;
11133 * If it is the RXT timer we want to
11134 * stop it, so we can restart a TLP.
11136 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
11137 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
11138 #ifdef NETFLIX_HTTP_LOGGING
11139 tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
11143 * Let the congestion control algorithm update congestion control
11144 * related information. This typically means increasing the
11145 * congestion window.
11147 if (tp->snd_wnd < ctf_outstanding(tp)) {
11148 /* The peer collapsed the window */
11149 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__);
11150 } else if (rack->rc_has_collapsed)
11151 rack_un_collapse_window(rack, __LINE__);
11152 if ((rack->r_collapse_point_valid) &&
11153 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point)))
11154 rack->r_collapse_point_valid = 0;
11156 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
11158 tp->snd_wl2 = th->th_ack;
11161 /* ND6_HINT(tp); *//* Some progress has been made. */
11164 * If all outstanding data are acked, stop retransmit timer,
11165 * otherwise restart timer using current (possibly backed-off)
11166 * value. If process is waiting for space, wakeup/selwakeup/signal.
11167 * If data are ready to send, let tcp_output decide between more
11168 * output or persist.
11171 if (so->so_options & SO_DEBUG)
11172 tcp_trace(TA_INPUT, ostate, tp,
11173 (void *)tcp_saveipgen,
11176 if (under_pacing &&
11177 (rack->use_fixed_rate == 0) &&
11178 (rack->in_probe_rtt == 0) &&
11179 rack->rc_gp_dyn_mul &&
11180 rack->rc_always_pace) {
11181 /* Check if we are dragging bottom */
11182 rack_check_bottom_drag(tp, rack, so, acked);
11184 if (tp->snd_una == tp->snd_max) {
11185 tp->t_flags &= ~TF_PREVVALID;
11186 rack->r_ctl.retran_during_recovery = 0;
11187 rack->r_ctl.dsack_byte_cnt = 0;
11188 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
11189 if (rack->r_ctl.rc_went_idle_time == 0)
11190 rack->r_ctl.rc_went_idle_time = 1;
11191 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
11192 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
11194 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
11196 if (acked && rack->r_fast_output)
11197 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked);
11198 if (sbavail(&so->so_snd)) {
11199 rack->r_wanted_output = 1;
11205 * Return value of 1, the TCB is unlocked and most
11206 * likely gone, return value of 0, the TCP is still
11210 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
11211 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11212 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11214 int32_t ret_val = 0;
11216 int32_t ourfinisacked = 0;
11217 struct tcp_rack *rack;
11219 ctf_calc_rwin(so, tp);
11221 * If the state is SYN_SENT: if seg contains an ACK, but not for our
11222 * SYN, drop the input. if seg contains a RST, then drop the
11223 * connection. if seg does not contain SYN, then drop it. Otherwise
11224 * this is an acceptable SYN segment initialize tp->rcv_nxt and
11225 * tp->irs if seg contains ack then advance tp->snd_una if seg
11226 * contains an ECE and ECN support is enabled, the stream is ECN
11227 * capable. if SYN has been acked change to ESTABLISHED else
11228 * SYN_RCVD state arrange for segment to be acked (eventually)
11229 * continue processing rest of data/controls.
11231 if ((thflags & TH_ACK) &&
11232 (SEQ_LEQ(th->th_ack, tp->iss) ||
11233 SEQ_GT(th->th_ack, tp->snd_max))) {
11234 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
11235 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11238 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
11239 TCP_PROBE5(connect__refused, NULL, tp,
11240 mtod(m, const char *), tp, th);
11241 tp = tcp_drop(tp, ECONNREFUSED);
11242 ctf_do_drop(m, tp);
11245 if (thflags & TH_RST) {
11246 ctf_do_drop(m, tp);
11249 if (!(thflags & TH_SYN)) {
11250 ctf_do_drop(m, tp);
11253 tp->irs = th->th_seq;
11254 tcp_rcvseqinit(tp);
11255 rack = (struct tcp_rack *)tp->t_fb_ptr;
11256 if (thflags & TH_ACK) {
11257 int tfo_partial = 0;
11259 KMOD_TCPSTAT_INC(tcps_connects);
11262 mac_socketpeer_set_from_mbuf(m, so);
11264 /* Do window scaling on this connection? */
11265 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
11266 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
11267 tp->rcv_scale = tp->request_r_scale;
11269 tp->rcv_adv += min(tp->rcv_wnd,
11270 TCP_MAXWIN << tp->rcv_scale);
11272 * If not all the data that was sent in the TFO SYN
11273 * has been acked, resend the remainder right away.
11275 if (IS_FASTOPEN(tp->t_flags) &&
11276 (tp->snd_una != tp->snd_max)) {
11277 tp->snd_nxt = th->th_ack;
11281 * If there's data, delay ACK; if there's also a FIN ACKNOW
11282 * will be turned on later.
11284 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) {
11285 rack_timer_cancel(tp, rack,
11286 rack->r_ctl.rc_rcvtime, __LINE__);
11287 tp->t_flags |= TF_DELACK;
11289 rack->r_wanted_output = 1;
11290 tp->t_flags |= TF_ACKNOW;
11291 rack->rc_dack_toggle = 0;
11294 tcp_ecn_input_syn_sent(tp, thflags, iptos);
11296 if (SEQ_GT(th->th_ack, tp->snd_una)) {
11298 * We advance snd_una for the
11299 * fast open case. If th_ack is
11300 * acknowledging data beyond
11301 * snd_una we can't just call
11302 * ack-processing since the
11303 * data stream in our send-map
11304 * will start at snd_una + 1 (one
11305 * beyond the SYN). If its just
11306 * equal we don't need to do that
11307 * and there is no send_map.
11312 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
11313 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
11315 tp->t_starttime = ticks;
11316 if (tp->t_flags & TF_NEEDFIN) {
11317 tcp_state_change(tp, TCPS_FIN_WAIT_1);
11318 tp->t_flags &= ~TF_NEEDFIN;
11319 thflags &= ~TH_SYN;
11321 tcp_state_change(tp, TCPS_ESTABLISHED);
11322 TCP_PROBE5(connect__established, NULL, tp,
11323 mtod(m, const char *), tp, th);
11324 rack_cc_conn_init(tp);
11328 * Received initial SYN in SYN-SENT[*] state => simultaneous
11329 * open. If segment contains CC option and there is a
11330 * cached CC, apply TAO test. If it succeeds, connection is *
11331 * half-synchronized. Otherwise, do 3-way handshake:
11332 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
11333 * there was no CC option, clear cached CC value.
11335 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN);
11336 tcp_state_change(tp, TCPS_SYN_RECEIVED);
11338 INP_WLOCK_ASSERT(tp->t_inpcb);
11340 * Advance th->th_seq to correspond to first data byte. If data,
11341 * trim to stay within window, dropping FIN if necessary.
11344 if (tlen > tp->rcv_wnd) {
11345 todrop = tlen - tp->rcv_wnd;
11347 tlen = tp->rcv_wnd;
11348 thflags &= ~TH_FIN;
11349 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin);
11350 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
11352 tp->snd_wl1 = th->th_seq - 1;
11353 tp->rcv_up = th->th_seq;
11355 * Client side of transaction: already sent SYN and data. If the
11356 * remote host used T/TCP to validate the SYN, our data will be
11357 * ACK'd; if so, enter normal data segment processing in the middle
11358 * of step 5, ack processing. Otherwise, goto step 6.
11360 if (thflags & TH_ACK) {
11361 /* For syn-sent we need to possibly update the rtt */
11362 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
11365 mcts = tcp_ts_getticks();
11366 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
11367 if (!tp->t_rttlow || tp->t_rttlow > t)
11369 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4);
11370 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
11371 tcp_rack_xmit_timer_commit(rack, tp);
11373 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
11375 /* We may have changed to FIN_WAIT_1 above */
11376 if (tp->t_state == TCPS_FIN_WAIT_1) {
11378 * In FIN_WAIT_1 STATE in addition to the processing
11379 * for the ESTABLISHED state if our FIN is now
11380 * acknowledged then enter FIN_WAIT_2.
11382 if (ourfinisacked) {
11384 * If we can't receive any more data, then
11385 * closing user can proceed. Starting the
11386 * timer is contrary to the specification,
11387 * but if we don't get a FIN we'll hang
11390 * XXXjl: we should release the tp also, and
11391 * use a compressed state.
11393 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
11394 soisdisconnected(so);
11395 tcp_timer_activate(tp, TT_2MSL,
11396 (tcp_fast_finwait2_recycle ?
11397 tcp_finwait2_timeout :
11400 tcp_state_change(tp, TCPS_FIN_WAIT_2);
11404 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11405 tiwin, thflags, nxt_pkt));
11409 * Return value of 1, the TCB is unlocked and most
11410 * likely gone, return value of 0, the TCP is still
11414 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
11415 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11416 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11418 struct tcp_rack *rack;
11419 int32_t ret_val = 0;
11420 int32_t ourfinisacked = 0;
11422 ctf_calc_rwin(so, tp);
11423 if ((thflags & TH_ACK) &&
11424 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
11425 SEQ_GT(th->th_ack, tp->snd_max))) {
11426 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
11427 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11430 rack = (struct tcp_rack *)tp->t_fb_ptr;
11431 if (IS_FASTOPEN(tp->t_flags)) {
11433 * When a TFO connection is in SYN_RECEIVED, the
11434 * only valid packets are the initial SYN, a
11435 * retransmit/copy of the initial SYN (possibly with
11436 * a subset of the original data), a valid ACK, a
11439 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
11440 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
11441 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11443 } else if (thflags & TH_SYN) {
11444 /* non-initial SYN is ignored */
11445 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
11446 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
11447 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
11448 ctf_do_drop(m, NULL);
11451 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
11452 ctf_do_drop(m, NULL);
11457 if ((thflags & TH_RST) ||
11458 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11459 return (__ctf_process_rst(m, th, so, tp,
11460 &rack->r_ctl.challenge_ack_ts,
11461 &rack->r_ctl.challenge_ack_cnt));
11463 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11464 * it's less than ts_recent, drop it.
11466 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11467 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11468 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11472 * In the SYN-RECEIVED state, validate that the packet belongs to
11473 * this connection before trimming the data to fit the receive
11474 * window. Check the sequence number versus IRS since we know the
11475 * sequence numbers haven't wrapped. This is a partial fix for the
11476 * "LAND" DoS attack.
11478 if (SEQ_LT(th->th_seq, tp->irs)) {
11479 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
11480 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11483 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11484 &rack->r_ctl.challenge_ack_ts,
11485 &rack->r_ctl.challenge_ack_cnt)) {
11489 * If last ACK falls within this segment's sequence numbers, record
11490 * its timestamp. NOTE: 1) That the test incorporates suggestions
11491 * from the latest proposal of the tcplw@cray.com list (Braden
11492 * 1993/04/26). 2) That updating only on newer timestamps interferes
11493 * with our earlier PAWS tests, so this check should be solely
11494 * predicated on the sequence space of this segment. 3) That we
11495 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11496 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11497 * SEG.Len, This modified check allows us to overcome RFC1323's
11498 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11499 * p.869. In such cases, we can still calculate the RTT correctly
11500 * when RCV.NXT == Last.ACK.Sent.
11502 if ((to->to_flags & TOF_TS) != 0 &&
11503 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11504 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11505 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11506 tp->ts_recent_age = tcp_ts_getticks();
11507 tp->ts_recent = to->to_tsval;
11509 tp->snd_wnd = tiwin;
11510 rack_validate_fo_sendwin_up(tp, rack);
11512 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11513 * is on (half-synchronized state), then queue data for later
11514 * processing; else drop segment and return.
11516 if ((thflags & TH_ACK) == 0) {
11517 if (IS_FASTOPEN(tp->t_flags)) {
11518 rack_cc_conn_init(tp);
11520 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11521 tiwin, thflags, nxt_pkt));
11523 KMOD_TCPSTAT_INC(tcps_connects);
11524 if (tp->t_flags & TF_SONOTCONN) {
11525 tp->t_flags &= ~TF_SONOTCONN;
11528 /* Do window scaling? */
11529 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
11530 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
11531 tp->rcv_scale = tp->request_r_scale;
11534 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* ->
11537 tp->t_starttime = ticks;
11538 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
11539 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
11540 tp->t_tfo_pending = NULL;
11542 if (tp->t_flags & TF_NEEDFIN) {
11543 tcp_state_change(tp, TCPS_FIN_WAIT_1);
11544 tp->t_flags &= ~TF_NEEDFIN;
11546 tcp_state_change(tp, TCPS_ESTABLISHED);
11547 TCP_PROBE5(accept__established, NULL, tp,
11548 mtod(m, const char *), tp, th);
11550 * TFO connections call cc_conn_init() during SYN
11551 * processing. Calling it again here for such connections
11552 * is not harmless as it would undo the snd_cwnd reduction
11553 * that occurs when a TFO SYN|ACK is retransmitted.
11555 if (!IS_FASTOPEN(tp->t_flags))
11556 rack_cc_conn_init(tp);
11559 * Account for the ACK of our SYN prior to
11560 * regular ACK processing below, except for
11561 * simultaneous SYN, which is handled later.
11563 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
11566 * If segment contains data or ACK, will call tcp_reass() later; if
11567 * not, do so now to pass queued data to user.
11569 if (tlen == 0 && (thflags & TH_FIN) == 0) {
11570 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
11572 if (tp->t_flags & TF_WAKESOR) {
11573 tp->t_flags &= ~TF_WAKESOR;
11574 /* NB: sorwakeup_locked() does an implicit unlock. */
11575 sorwakeup_locked(so);
11578 tp->snd_wl1 = th->th_seq - 1;
11579 /* For syn-recv we need to possibly update the rtt */
11580 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
11583 mcts = tcp_ts_getticks();
11584 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
11585 if (!tp->t_rttlow || tp->t_rttlow > t)
11587 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5);
11588 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
11589 tcp_rack_xmit_timer_commit(rack, tp);
11591 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11594 if (tp->t_state == TCPS_FIN_WAIT_1) {
11595 /* We could have went to FIN_WAIT_1 (or EST) above */
11597 * In FIN_WAIT_1 STATE in addition to the processing for the
11598 * ESTABLISHED state if our FIN is now acknowledged then
11599 * enter FIN_WAIT_2.
11601 if (ourfinisacked) {
11603 * If we can't receive any more data, then closing
11604 * user can proceed. Starting the timer is contrary
11605 * to the specification, but if we don't get a FIN
11606 * we'll hang forever.
11608 * XXXjl: we should release the tp also, and use a
11609 * compressed state.
11611 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
11612 soisdisconnected(so);
11613 tcp_timer_activate(tp, TT_2MSL,
11614 (tcp_fast_finwait2_recycle ?
11615 tcp_finwait2_timeout :
11618 tcp_state_change(tp, TCPS_FIN_WAIT_2);
11621 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11622 tiwin, thflags, nxt_pkt));
11626 * Return value of 1, the TCB is unlocked and most
11627 * likely gone, return value of 0, the TCP is still
11631 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
11632 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11633 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11635 int32_t ret_val = 0;
11636 struct tcp_rack *rack;
11639 * Header prediction: check for the two common cases of a
11640 * uni-directional data xfer. If the packet has no control flags,
11641 * is in-sequence, the window didn't change and we're not
11642 * retransmitting, it's a candidate. If the length is zero and the
11643 * ack moved forward, we're the sender side of the xfer. Just free
11644 * the data acked & wake any higher level process that was blocked
11645 * waiting for space. If the length is non-zero and the ack didn't
11646 * move, we're the receiver side. If we're getting packets in-order
11647 * (the reassembly queue is empty), add the data toc The socket
11648 * buffer and note that we need a delayed ack. Make sure that the
11649 * hidden state-flags are also off. Since we check for
11650 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
11652 rack = (struct tcp_rack *)tp->t_fb_ptr;
11653 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
11654 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) &&
11655 __predict_true(SEGQ_EMPTY(tp)) &&
11656 __predict_true(th->th_seq == tp->rcv_nxt)) {
11658 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
11659 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
11663 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
11664 tiwin, nxt_pkt, iptos)) {
11669 ctf_calc_rwin(so, tp);
11671 if ((thflags & TH_RST) ||
11672 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11673 return (__ctf_process_rst(m, th, so, tp,
11674 &rack->r_ctl.challenge_ack_ts,
11675 &rack->r_ctl.challenge_ack_cnt));
11678 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11679 * synchronized state.
11681 if (thflags & TH_SYN) {
11682 ctf_challenge_ack(m, th, tp, &ret_val);
11686 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11687 * it's less than ts_recent, drop it.
11689 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11690 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11691 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11694 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11695 &rack->r_ctl.challenge_ack_ts,
11696 &rack->r_ctl.challenge_ack_cnt)) {
11700 * If last ACK falls within this segment's sequence numbers, record
11701 * its timestamp. NOTE: 1) That the test incorporates suggestions
11702 * from the latest proposal of the tcplw@cray.com list (Braden
11703 * 1993/04/26). 2) That updating only on newer timestamps interferes
11704 * with our earlier PAWS tests, so this check should be solely
11705 * predicated on the sequence space of this segment. 3) That we
11706 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11707 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11708 * SEG.Len, This modified check allows us to overcome RFC1323's
11709 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11710 * p.869. In such cases, we can still calculate the RTT correctly
11711 * when RCV.NXT == Last.ACK.Sent.
11713 if ((to->to_flags & TOF_TS) != 0 &&
11714 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11715 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11716 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11717 tp->ts_recent_age = tcp_ts_getticks();
11718 tp->ts_recent = to->to_tsval;
11721 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11722 * is on (half-synchronized state), then queue data for later
11723 * processing; else drop segment and return.
11725 if ((thflags & TH_ACK) == 0) {
11726 if (tp->t_flags & TF_NEEDSYN) {
11727 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11728 tiwin, thflags, nxt_pkt));
11730 } else if (tp->t_flags & TF_ACKNOW) {
11731 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11732 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11735 ctf_do_drop(m, NULL);
11742 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
11745 if (sbavail(&so->so_snd)) {
11746 if (ctf_progress_timeout_check(tp, true)) {
11747 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
11748 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11752 /* State changes only happen in rack_process_data() */
11753 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11754 tiwin, thflags, nxt_pkt));
11758 * Return value of 1, the TCB is unlocked and most
11759 * likely gone, return value of 0, the TCP is still
11763 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
11764 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11765 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11767 int32_t ret_val = 0;
11768 struct tcp_rack *rack;
11770 rack = (struct tcp_rack *)tp->t_fb_ptr;
11771 ctf_calc_rwin(so, tp);
11772 if ((thflags & TH_RST) ||
11773 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11774 return (__ctf_process_rst(m, th, so, tp,
11775 &rack->r_ctl.challenge_ack_ts,
11776 &rack->r_ctl.challenge_ack_cnt));
11778 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11779 * synchronized state.
11781 if (thflags & TH_SYN) {
11782 ctf_challenge_ack(m, th, tp, &ret_val);
11786 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11787 * it's less than ts_recent, drop it.
11789 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11790 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11791 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11794 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11795 &rack->r_ctl.challenge_ack_ts,
11796 &rack->r_ctl.challenge_ack_cnt)) {
11800 * If last ACK falls within this segment's sequence numbers, record
11801 * its timestamp. NOTE: 1) That the test incorporates suggestions
11802 * from the latest proposal of the tcplw@cray.com list (Braden
11803 * 1993/04/26). 2) That updating only on newer timestamps interferes
11804 * with our earlier PAWS tests, so this check should be solely
11805 * predicated on the sequence space of this segment. 3) That we
11806 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11807 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11808 * SEG.Len, This modified check allows us to overcome RFC1323's
11809 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11810 * p.869. In such cases, we can still calculate the RTT correctly
11811 * when RCV.NXT == Last.ACK.Sent.
11813 if ((to->to_flags & TOF_TS) != 0 &&
11814 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11815 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11816 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11817 tp->ts_recent_age = tcp_ts_getticks();
11818 tp->ts_recent = to->to_tsval;
11821 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11822 * is on (half-synchronized state), then queue data for later
11823 * processing; else drop segment and return.
11825 if ((thflags & TH_ACK) == 0) {
11826 if (tp->t_flags & TF_NEEDSYN) {
11827 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11828 tiwin, thflags, nxt_pkt));
11830 } else if (tp->t_flags & TF_ACKNOW) {
11831 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11832 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11835 ctf_do_drop(m, NULL);
11842 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
11845 if (sbavail(&so->so_snd)) {
11846 if (ctf_progress_timeout_check(tp, true)) {
11847 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11848 tp, tick, PROGRESS_DROP, __LINE__);
11849 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11853 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11854 tiwin, thflags, nxt_pkt));
11858 rack_check_data_after_close(struct mbuf *m,
11859 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
11861 struct tcp_rack *rack;
11863 rack = (struct tcp_rack *)tp->t_fb_ptr;
11864 if (rack->rc_allow_data_af_clo == 0) {
11866 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
11867 /* tcp_close will kill the inp pre-log the Reset */
11868 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
11869 tp = tcp_close(tp);
11870 KMOD_TCPSTAT_INC(tcps_rcvafterclose);
11871 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
11874 if (sbavail(&so->so_snd) == 0)
11876 /* Ok we allow data that is ignored and a followup reset */
11877 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
11878 tp->rcv_nxt = th->th_seq + *tlen;
11879 tp->t_flags2 |= TF2_DROP_AF_DATA;
11880 rack->r_wanted_output = 1;
11886 * Return value of 1, the TCB is unlocked and most
11887 * likely gone, return value of 0, the TCP is still
11891 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
11892 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11893 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11895 int32_t ret_val = 0;
11896 int32_t ourfinisacked = 0;
11897 struct tcp_rack *rack;
11899 rack = (struct tcp_rack *)tp->t_fb_ptr;
11900 ctf_calc_rwin(so, tp);
11902 if ((thflags & TH_RST) ||
11903 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11904 return (__ctf_process_rst(m, th, so, tp,
11905 &rack->r_ctl.challenge_ack_ts,
11906 &rack->r_ctl.challenge_ack_cnt));
11908 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11909 * synchronized state.
11911 if (thflags & TH_SYN) {
11912 ctf_challenge_ack(m, th, tp, &ret_val);
11916 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11917 * it's less than ts_recent, drop it.
11919 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11920 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11921 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11924 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11925 &rack->r_ctl.challenge_ack_ts,
11926 &rack->r_ctl.challenge_ack_cnt)) {
11930 * If new data are received on a connection after the user processes
11931 * are gone, then RST the other end.
11933 if ((tp->t_flags & TF_CLOSED) && tlen &&
11934 rack_check_data_after_close(m, tp, &tlen, th, so))
11937 * If last ACK falls within this segment's sequence numbers, record
11938 * its timestamp. NOTE: 1) That the test incorporates suggestions
11939 * from the latest proposal of the tcplw@cray.com list (Braden
11940 * 1993/04/26). 2) That updating only on newer timestamps interferes
11941 * with our earlier PAWS tests, so this check should be solely
11942 * predicated on the sequence space of this segment. 3) That we
11943 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11944 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11945 * SEG.Len, This modified check allows us to overcome RFC1323's
11946 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11947 * p.869. In such cases, we can still calculate the RTT correctly
11948 * when RCV.NXT == Last.ACK.Sent.
11950 if ((to->to_flags & TOF_TS) != 0 &&
11951 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11952 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11953 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11954 tp->ts_recent_age = tcp_ts_getticks();
11955 tp->ts_recent = to->to_tsval;
11958 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11959 * is on (half-synchronized state), then queue data for later
11960 * processing; else drop segment and return.
11962 if ((thflags & TH_ACK) == 0) {
11963 if (tp->t_flags & TF_NEEDSYN) {
11964 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11965 tiwin, thflags, nxt_pkt));
11966 } else if (tp->t_flags & TF_ACKNOW) {
11967 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11968 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11971 ctf_do_drop(m, NULL);
11978 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11981 if (ourfinisacked) {
11983 * If we can't receive any more data, then closing user can
11984 * proceed. Starting the timer is contrary to the
11985 * specification, but if we don't get a FIN we'll hang
11988 * XXXjl: we should release the tp also, and use a
11989 * compressed state.
11991 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
11992 soisdisconnected(so);
11993 tcp_timer_activate(tp, TT_2MSL,
11994 (tcp_fast_finwait2_recycle ?
11995 tcp_finwait2_timeout :
11998 tcp_state_change(tp, TCPS_FIN_WAIT_2);
12000 if (sbavail(&so->so_snd)) {
12001 if (ctf_progress_timeout_check(tp, true)) {
12002 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
12003 tp, tick, PROGRESS_DROP, __LINE__);
12004 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
12008 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
12009 tiwin, thflags, nxt_pkt));
12013 * Return value of 1, the TCB is unlocked and most
12014 * likely gone, return value of 0, the TCP is still
12018 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
12019 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
12020 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
12022 int32_t ret_val = 0;
12023 int32_t ourfinisacked = 0;
12024 struct tcp_rack *rack;
12026 rack = (struct tcp_rack *)tp->t_fb_ptr;
12027 ctf_calc_rwin(so, tp);
12029 if ((thflags & TH_RST) ||
12030 (tp->t_fin_is_rst && (thflags & TH_FIN)))
12031 return (__ctf_process_rst(m, th, so, tp,
12032 &rack->r_ctl.challenge_ack_ts,
12033 &rack->r_ctl.challenge_ack_cnt));
12035 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
12036 * synchronized state.
12038 if (thflags & TH_SYN) {
12039 ctf_challenge_ack(m, th, tp, &ret_val);
12043 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
12044 * it's less than ts_recent, drop it.
12046 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
12047 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
12048 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
12051 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
12052 &rack->r_ctl.challenge_ack_ts,
12053 &rack->r_ctl.challenge_ack_cnt)) {
12057 * If new data are received on a connection after the user processes
12058 * are gone, then RST the other end.
12060 if ((tp->t_flags & TF_CLOSED) && tlen &&
12061 rack_check_data_after_close(m, tp, &tlen, th, so))
12064 * If last ACK falls within this segment's sequence numbers, record
12065 * its timestamp. NOTE: 1) That the test incorporates suggestions
12066 * from the latest proposal of the tcplw@cray.com list (Braden
12067 * 1993/04/26). 2) That updating only on newer timestamps interferes
12068 * with our earlier PAWS tests, so this check should be solely
12069 * predicated on the sequence space of this segment. 3) That we
12070 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
12071 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
12072 * SEG.Len, This modified check allows us to overcome RFC1323's
12073 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
12074 * p.869. In such cases, we can still calculate the RTT correctly
12075 * when RCV.NXT == Last.ACK.Sent.
12077 if ((to->to_flags & TOF_TS) != 0 &&
12078 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
12079 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
12080 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
12081 tp->ts_recent_age = tcp_ts_getticks();
12082 tp->ts_recent = to->to_tsval;
12085 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
12086 * is on (half-synchronized state), then queue data for later
12087 * processing; else drop segment and return.
12089 if ((thflags & TH_ACK) == 0) {
12090 if (tp->t_flags & TF_NEEDSYN) {
12091 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
12092 tiwin, thflags, nxt_pkt));
12093 } else if (tp->t_flags & TF_ACKNOW) {
12094 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
12095 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
12098 ctf_do_drop(m, NULL);
12105 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
12108 if (ourfinisacked) {
12113 if (sbavail(&so->so_snd)) {
12114 if (ctf_progress_timeout_check(tp, true)) {
12115 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
12116 tp, tick, PROGRESS_DROP, __LINE__);
12117 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
12121 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
12122 tiwin, thflags, nxt_pkt));
12126 * Return value of 1, the TCB is unlocked and most
12127 * likely gone, return value of 0, the TCP is still
12131 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
12132 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
12133 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
12135 int32_t ret_val = 0;
12136 int32_t ourfinisacked = 0;
12137 struct tcp_rack *rack;
12139 rack = (struct tcp_rack *)tp->t_fb_ptr;
12140 ctf_calc_rwin(so, tp);
12142 if ((thflags & TH_RST) ||
12143 (tp->t_fin_is_rst && (thflags & TH_FIN)))
12144 return (__ctf_process_rst(m, th, so, tp,
12145 &rack->r_ctl.challenge_ack_ts,
12146 &rack->r_ctl.challenge_ack_cnt));
12148 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
12149 * synchronized state.
12151 if (thflags & TH_SYN) {
12152 ctf_challenge_ack(m, th, tp, &ret_val);
12156 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
12157 * it's less than ts_recent, drop it.
12159 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
12160 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
12161 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
12164 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
12165 &rack->r_ctl.challenge_ack_ts,
12166 &rack->r_ctl.challenge_ack_cnt)) {
12170 * If new data are received on a connection after the user processes
12171 * are gone, then RST the other end.
12173 if ((tp->t_flags & TF_CLOSED) && tlen &&
12174 rack_check_data_after_close(m, tp, &tlen, th, so))
12177 * If last ACK falls within this segment's sequence numbers, record
12178 * its timestamp. NOTE: 1) That the test incorporates suggestions
12179 * from the latest proposal of the tcplw@cray.com list (Braden
12180 * 1993/04/26). 2) That updating only on newer timestamps interferes
12181 * with our earlier PAWS tests, so this check should be solely
12182 * predicated on the sequence space of this segment. 3) That we
12183 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
12184 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
12185 * SEG.Len, This modified check allows us to overcome RFC1323's
12186 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
12187 * p.869. In such cases, we can still calculate the RTT correctly
12188 * when RCV.NXT == Last.ACK.Sent.
12190 if ((to->to_flags & TOF_TS) != 0 &&
12191 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
12192 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
12193 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
12194 tp->ts_recent_age = tcp_ts_getticks();
12195 tp->ts_recent = to->to_tsval;
12198 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
12199 * is on (half-synchronized state), then queue data for later
12200 * processing; else drop segment and return.
12202 if ((thflags & TH_ACK) == 0) {
12203 if (tp->t_flags & TF_NEEDSYN) {
12204 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
12205 tiwin, thflags, nxt_pkt));
12206 } else if (tp->t_flags & TF_ACKNOW) {
12207 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
12208 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
12211 ctf_do_drop(m, NULL);
12216 * case TCPS_LAST_ACK: Ack processing.
12218 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
12221 if (ourfinisacked) {
12222 tp = tcp_close(tp);
12223 ctf_do_drop(m, tp);
12226 if (sbavail(&so->so_snd)) {
12227 if (ctf_progress_timeout_check(tp, true)) {
12228 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
12229 tp, tick, PROGRESS_DROP, __LINE__);
12230 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
12234 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
12235 tiwin, thflags, nxt_pkt));
12239 * Return value of 1, the TCB is unlocked and most
12240 * likely gone, return value of 0, the TCP is still
12244 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
12245 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
12246 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
12248 int32_t ret_val = 0;
12249 int32_t ourfinisacked = 0;
12250 struct tcp_rack *rack;
12252 rack = (struct tcp_rack *)tp->t_fb_ptr;
12253 ctf_calc_rwin(so, tp);
12255 /* Reset receive buffer auto scaling when not in bulk receive mode. */
12256 if ((thflags & TH_RST) ||
12257 (tp->t_fin_is_rst && (thflags & TH_FIN)))
12258 return (__ctf_process_rst(m, th, so, tp,
12259 &rack->r_ctl.challenge_ack_ts,
12260 &rack->r_ctl.challenge_ack_cnt));
12262 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
12263 * synchronized state.
12265 if (thflags & TH_SYN) {
12266 ctf_challenge_ack(m, th, tp, &ret_val);
12270 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
12271 * it's less than ts_recent, drop it.
12273 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
12274 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
12275 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
12278 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
12279 &rack->r_ctl.challenge_ack_ts,
12280 &rack->r_ctl.challenge_ack_cnt)) {
12284 * If new data are received on a connection after the user processes
12285 * are gone, then RST the other end.
12287 if ((tp->t_flags & TF_CLOSED) && tlen &&
12288 rack_check_data_after_close(m, tp, &tlen, th, so))
12291 * If last ACK falls within this segment's sequence numbers, record
12292 * its timestamp. NOTE: 1) That the test incorporates suggestions
12293 * from the latest proposal of the tcplw@cray.com list (Braden
12294 * 1993/04/26). 2) That updating only on newer timestamps interferes
12295 * with our earlier PAWS tests, so this check should be solely
12296 * predicated on the sequence space of this segment. 3) That we
12297 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
12298 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
12299 * SEG.Len, This modified check allows us to overcome RFC1323's
12300 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
12301 * p.869. In such cases, we can still calculate the RTT correctly
12302 * when RCV.NXT == Last.ACK.Sent.
12304 if ((to->to_flags & TOF_TS) != 0 &&
12305 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
12306 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
12307 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
12308 tp->ts_recent_age = tcp_ts_getticks();
12309 tp->ts_recent = to->to_tsval;
12312 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
12313 * is on (half-synchronized state), then queue data for later
12314 * processing; else drop segment and return.
12316 if ((thflags & TH_ACK) == 0) {
12317 if (tp->t_flags & TF_NEEDSYN) {
12318 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
12319 tiwin, thflags, nxt_pkt));
12320 } else if (tp->t_flags & TF_ACKNOW) {
12321 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
12322 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
12325 ctf_do_drop(m, NULL);
12332 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
12335 if (sbavail(&so->so_snd)) {
12336 if (ctf_progress_timeout_check(tp, true)) {
12337 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
12338 tp, tick, PROGRESS_DROP, __LINE__);
12339 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
12343 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
12344 tiwin, thflags, nxt_pkt));
12348 rack_clear_rate_sample(struct tcp_rack *rack)
12350 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
12351 rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
12352 rack->r_ctl.rack_rs.rs_rtt_tot = 0;
12356 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override)
12358 uint64_t bw_est, rate_wanted;
12360 uint32_t user_max, orig_min, orig_max;
12362 orig_min = rack->r_ctl.rc_pace_min_segs;
12363 orig_max = rack->r_ctl.rc_pace_max_segs;
12364 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs;
12365 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs)
12367 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp);
12368 if (rack->use_fixed_rate || rack->rc_force_max_seg) {
12369 if (user_max != rack->r_ctl.rc_pace_max_segs)
12372 if (rack->rc_force_max_seg) {
12373 rack->r_ctl.rc_pace_max_segs = user_max;
12374 } else if (rack->use_fixed_rate) {
12375 bw_est = rack_get_bw(rack);
12376 if ((rack->r_ctl.crte == NULL) ||
12377 (bw_est != rack->r_ctl.crte->rate)) {
12378 rack->r_ctl.rc_pace_max_segs = user_max;
12380 /* We are pacing right at the hardware rate */
12383 segsiz = min(ctf_fixed_maxseg(tp),
12384 rack->r_ctl.rc_pace_min_segs);
12385 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(
12386 tp, bw_est, segsiz, 0,
12387 rack->r_ctl.crte, NULL);
12389 } else if (rack->rc_always_pace) {
12390 if (rack->r_ctl.gp_bw ||
12391 #ifdef NETFLIX_PEAKRATE
12392 rack->rc_tp->t_maxpeakrate ||
12394 rack->r_ctl.init_rate) {
12395 /* We have a rate of some sort set */
12398 bw_est = rack_get_bw(rack);
12399 orig = rack->r_ctl.rc_pace_max_segs;
12401 rate_wanted = *fill_override;
12403 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL);
12405 /* We have something */
12406 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack,
12408 ctf_fixed_maxseg(rack->rc_tp));
12410 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs;
12411 if (orig != rack->r_ctl.rc_pace_max_segs)
12413 } else if ((rack->r_ctl.gp_bw == 0) &&
12414 (rack->r_ctl.rc_pace_max_segs == 0)) {
12416 * If we have nothing limit us to bursting
12417 * out IW sized pieces.
12420 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack);
12423 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) {
12425 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES;
12428 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2);
12433 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack)
12436 struct ip6_hdr *ip6 = NULL;
12439 struct ip *ip = NULL;
12441 struct udphdr *udp = NULL;
12443 /* Ok lets fill in the fast block, it can only be used with no IP options! */
12445 if (rack->r_is_v6) {
12446 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
12447 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
12449 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
12450 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
12451 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
12452 udp->uh_dport = tp->t_port;
12453 rack->r_ctl.fsb.udp = udp;
12454 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
12457 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1);
12458 rack->r_ctl.fsb.udp = NULL;
12460 tcpip_fillheaders(rack->rc_inp,
12462 ip6, rack->r_ctl.fsb.th);
12466 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr);
12467 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
12469 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
12470 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
12471 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
12472 udp->uh_dport = tp->t_port;
12473 rack->r_ctl.fsb.udp = udp;
12474 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
12477 rack->r_ctl.fsb.udp = NULL;
12478 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1);
12480 tcpip_fillheaders(rack->rc_inp,
12482 ip, rack->r_ctl.fsb.th);
12484 rack->r_fsb_inited = 1;
12488 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack)
12491 * Allocate the larger of spaces V6 if available else just
12492 * V4 and include udphdr (overbook)
12495 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr);
12497 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr);
12499 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len,
12500 M_TCPFSB, M_NOWAIT|M_ZERO);
12501 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) {
12504 rack->r_fsb_inited = 0;
12509 rack_init(struct tcpcb *tp)
12511 struct tcp_rack *rack = NULL;
12513 struct rack_sendmap *insret;
12515 uint32_t iwin, snt, us_cts;
12518 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
12519 if (tp->t_fb_ptr == NULL) {
12521 * We need to allocate memory but cant. The INP and INP_INFO
12522 * locks and they are recursive (happens during setup. So a
12523 * scheme to drop the locks fails :(
12528 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack));
12530 rack = (struct tcp_rack *)tp->t_fb_ptr;
12531 RB_INIT(&rack->r_ctl.rc_mtree);
12532 TAILQ_INIT(&rack->r_ctl.rc_free);
12533 TAILQ_INIT(&rack->r_ctl.rc_tmap);
12535 rack->rc_inp = tp->t_inpcb;
12537 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
12538 /* Probably not needed but lets be sure */
12539 rack_clear_rate_sample(rack);
12541 * Save off the default values, socket options will poke
12542 * at these if pacing is not on or we have not yet
12543 * reached where pacing is on (gp_ready/fixed enabled).
12544 * When they get set into the CC module (when gp_ready
12545 * is enabled or we enable fixed) then we will set these
12546 * values into the CC and place in here the old values
12547 * so we have a restoral. Then we will set the flag
12548 * rc_pacing_cc_set. That way whenever we turn off pacing
12549 * or switch off this stack, we will know to go restore
12550 * the saved values.
12552 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn;
12553 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn;
12554 /* We want abe like behavior as well */
12555 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED;
12556 rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
12557 rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
12558 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
12559 rack->r_ctl.roundends = tp->snd_max;
12561 rack->use_rack_rr = 1;
12562 if (V_tcp_delack_enabled)
12563 tp->t_delayed_ack = 1;
12565 tp->t_delayed_ack = 0;
12566 #ifdef TCP_ACCOUNTING
12567 if (rack_tcp_accounting) {
12568 tp->t_flags2 |= TF2_TCP_ACCOUNTING;
12571 if (rack_enable_shared_cwnd)
12572 rack->rack_enable_scwnd = 1;
12573 rack->rc_user_set_max_segs = rack_hptsi_segments;
12574 rack->rc_force_max_seg = 0;
12575 if (rack_use_imac_dack)
12576 rack->rc_dack_mode = 1;
12577 TAILQ_INIT(&rack->r_ctl.opt_list);
12578 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
12579 rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
12580 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
12581 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff;
12582 rack->r_ctl.rc_highest_us_rtt = 0;
12583 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap;
12584 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop);
12585 if (rack_use_cmp_acks)
12586 rack->r_use_cmp_ack = 1;
12587 if (rack_disable_prr)
12588 rack->rack_no_prr = 1;
12589 if (rack_gp_no_rec_chg)
12590 rack->rc_gp_no_rec_chg = 1;
12591 if (rack_pace_every_seg && tcp_can_enable_pacing()) {
12592 rack->rc_always_pace = 1;
12593 if (rack->use_fixed_rate || rack->gp_ready)
12594 rack_set_cc_pacing(rack);
12596 rack->rc_always_pace = 0;
12597 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack)
12598 rack->r_mbuf_queue = 1;
12600 rack->r_mbuf_queue = 0;
12601 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
12602 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
12604 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
12605 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12606 if (rack_limits_scwnd)
12607 rack->r_limit_scw = 1;
12609 rack->r_limit_scw = 0;
12610 rack->rc_labc = V_tcp_abc_l_var;
12611 rack->r_ctl.rc_high_rwnd = tp->snd_wnd;
12612 rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
12613 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
12614 rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
12615 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
12616 rack->r_ctl.rc_min_to = rack_min_to;
12617 microuptime(&rack->r_ctl.act_rcv_time);
12618 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
12619 rack->rc_init_win = rack_default_init_window;
12620 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss;
12621 if (rack_hw_up_only)
12622 rack->r_up_only = 1;
12623 if (rack_do_dyn_mul) {
12624 /* When dynamic adjustment is on CA needs to start at 100% */
12625 rack->rc_gp_dyn_mul = 1;
12626 if (rack_do_dyn_mul >= 100)
12627 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
12629 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
12630 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec;
12631 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
12632 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
12633 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN,
12634 rack_probertt_filter_life);
12635 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
12636 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
12637 rack->r_ctl.rc_time_of_last_probertt = us_cts;
12638 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks();
12639 rack->r_ctl.rc_time_probertt_starts = 0;
12640 if (rack_dsack_std_based & 0x1) {
12641 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */
12642 rack->rc_rack_tmr_std_based = 1;
12644 if (rack_dsack_std_based & 0x2) {
12645 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */
12646 rack->rc_rack_use_dsack = 1;
12648 /* We require at least one measurement, even if the sysctl is 0 */
12649 if (rack_req_measurements)
12650 rack->r_ctl.req_measurements = rack_req_measurements;
12652 rack->r_ctl.req_measurements = 1;
12653 if (rack_enable_hw_pacing)
12654 rack->rack_hdw_pace_ena = 1;
12655 if (rack_hw_rate_caps)
12656 rack->r_rack_hw_rate_caps = 1;
12657 /* Do we force on detection? */
12658 #ifdef NETFLIX_EXP_DETECTION
12659 if (tcp_force_detection)
12660 rack->do_detection = 1;
12663 rack->do_detection = 0;
12664 if (rack_non_rxt_use_cr)
12665 rack->rack_rec_nonrxt_use_cr = 1;
12666 err = rack_init_fsb(tp, rack);
12668 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12669 tp->t_fb_ptr = NULL;
12672 if (tp->snd_una != tp->snd_max) {
12673 /* Create a send map for the current outstanding data */
12674 struct rack_sendmap *rsm;
12676 rsm = rack_alloc(rack);
12678 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12679 tp->t_fb_ptr = NULL;
12682 rsm->r_no_rtt_allowed = 1;
12683 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
12684 rsm->r_rtr_cnt = 1;
12685 rsm->r_rtr_bytes = 0;
12686 if (tp->t_flags & TF_SENTFIN)
12687 rsm->r_flags |= RACK_HAS_FIN;
12688 if ((tp->snd_una == tp->iss) &&
12689 !TCPS_HAVEESTABLISHED(tp->t_state))
12690 rsm->r_flags |= RACK_HAS_SYN;
12691 rsm->r_start = tp->snd_una;
12692 rsm->r_end = tp->snd_max;
12694 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) {
12695 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff);
12697 rsm->orig_m_len = rsm->m->m_len;
12699 rsm->orig_m_len = 0;
12702 * This can happen if we have a stand-alone FIN or
12706 rsm->orig_m_len = 0;
12710 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12712 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12713 if (insret != NULL) {
12714 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p",
12715 insret, rack, rsm);
12718 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
12719 rsm->r_in_tmap = 1;
12722 * Timers in Rack are kept in microseconds so lets
12723 * convert any initial incoming variables
12724 * from ticks into usecs. Note that we
12725 * also change the values of t_srtt and t_rttvar, if
12726 * they are non-zero. They are kept with a 5
12727 * bit decimal so we have to carefully convert
12728 * these to get the full precision.
12730 rack_convert_rtts(tp);
12731 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow);
12732 if (rack_do_hystart) {
12733 tp->ccv->flags |= CCF_HYSTART_ALLOWED;
12734 if (rack_do_hystart > 1)
12735 tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND;
12736 if (rack_do_hystart > 2)
12737 tp->ccv->flags |= CCF_HYSTART_CONS_SSTH;
12739 if (rack_def_profile)
12740 rack_set_profile(rack, rack_def_profile);
12741 /* Cancel the GP measurement in progress */
12742 tp->t_flags &= ~TF_GPUTINPROG;
12743 if (SEQ_GT(tp->snd_max, tp->iss))
12744 snt = tp->snd_max - tp->iss;
12747 iwin = rc_init_window(rack);
12749 /* We are not past the initial window
12750 * so we need to make sure cwnd is
12753 if (tp->snd_cwnd < iwin)
12754 tp->snd_cwnd = iwin;
12756 * If we are within the initial window
12757 * we want ssthresh to be unlimited. Setting
12758 * it to the rwnd (which the default stack does
12759 * and older racks) is not really a good idea
12760 * since we want to be in SS and grow both the
12761 * cwnd and the rwnd (via dynamic rwnd growth). If
12762 * we set it to the rwnd then as the peer grows its
12763 * rwnd we will be stuck in CA and never hit SS.
12765 * Its far better to raise it up high (this takes the
12766 * risk that there as been a loss already, probably
12767 * we should have an indicator in all stacks of loss
12768 * but we don't), but considering the normal use this
12769 * is a risk worth taking. The consequences of not
12770 * hitting SS are far worse than going one more time
12771 * into it early on (before we have sent even a IW).
12772 * It is highly unlikely that we will have had a loss
12773 * before getting the IW out.
12775 tp->snd_ssthresh = 0xffffffff;
12777 rack_stop_all_timers(tp);
12778 /* Lets setup the fsb block */
12779 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
12780 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur,
12781 __LINE__, RACK_RTTS_INIT);
12786 rack_handoff_ok(struct tcpcb *tp)
12788 if ((tp->t_state == TCPS_CLOSED) ||
12789 (tp->t_state == TCPS_LISTEN)) {
12790 /* Sure no problem though it may not stick */
12793 if ((tp->t_state == TCPS_SYN_SENT) ||
12794 (tp->t_state == TCPS_SYN_RECEIVED)) {
12796 * We really don't know if you support sack,
12797 * you have to get to ESTAB or beyond to tell.
12801 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) {
12803 * Rack will only send a FIN after all data is acknowledged.
12804 * So in this case we have more data outstanding. We can't
12805 * switch stacks until either all data and only the FIN
12806 * is left (in which case rack_init() now knows how
12807 * to deal with that) <or> all is acknowledged and we
12808 * are only left with incoming data, though why you
12809 * would want to switch to rack after all data is acknowledged
12810 * I have no idea (rrs)!
12814 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){
12818 * If we reach here we don't do SACK on this connection so we can
12826 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
12828 if (tp->t_fb_ptr) {
12829 struct tcp_rack *rack;
12830 struct rack_sendmap *rsm, *nrsm;
12832 struct rack_sendmap *rm;
12835 rack = (struct tcp_rack *)tp->t_fb_ptr;
12836 if (tp->t_in_pkt) {
12838 * It is unsafe to process the packets since a
12839 * reset may be lurking in them (its rare but it
12840 * can occur). If we were to find a RST, then we
12841 * would end up dropping the connection and the
12842 * INP lock, so when we return the caller (tcp_usrreq)
12843 * will blow up when it trys to unlock the inp.
12845 struct mbuf *save, *m;
12848 tp->t_in_pkt = NULL;
12849 tp->t_tail_pkt = NULL;
12851 save = m->m_nextpkt;
12852 m->m_nextpkt = NULL;
12857 tp->t_flags &= ~TF_FORCEDATA;
12858 #ifdef NETFLIX_SHARED_CWND
12859 if (rack->r_ctl.rc_scw) {
12862 if (rack->r_limit_scw)
12863 limit = max(1, rack->r_ctl.rc_lowest_us_rtt);
12866 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw,
12867 rack->r_ctl.rc_scw_index,
12869 rack->r_ctl.rc_scw = NULL;
12872 if (rack->r_ctl.fsb.tcp_ip_hdr) {
12873 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB);
12874 rack->r_ctl.fsb.tcp_ip_hdr = NULL;
12875 rack->r_ctl.fsb.th = NULL;
12877 /* Convert back to ticks, with */
12878 if (tp->t_srtt > 1) {
12879 uint32_t val, frac;
12881 val = USEC_2_TICKS(tp->t_srtt);
12882 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz);
12883 tp->t_srtt = val << TCP_RTT_SHIFT;
12885 * frac is the fractional part here is left
12886 * over from converting to hz and shifting.
12887 * We need to convert this to the 5 bit
12892 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC);
12894 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC);
12896 tp->t_srtt += frac;
12899 if (tp->t_rttvar) {
12900 uint32_t val, frac;
12902 val = USEC_2_TICKS(tp->t_rttvar);
12903 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz);
12904 tp->t_rttvar = val << TCP_RTTVAR_SHIFT;
12906 * frac is the fractional part here is left
12907 * over from converting to hz and shifting.
12908 * We need to convert this to the 5 bit
12913 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC);
12915 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC);
12917 tp->t_rttvar += frac;
12920 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur);
12921 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow);
12922 if (rack->rc_always_pace) {
12923 tcp_decrement_paced_conn();
12924 rack_undo_cc_pacing(rack);
12925 rack->rc_always_pace = 0;
12927 /* Clean up any options if they were not applied */
12928 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) {
12929 struct deferred_opt_list *dol;
12931 dol = TAILQ_FIRST(&rack->r_ctl.opt_list);
12932 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
12933 free(dol, M_TCPDO);
12935 /* rack does not use force data but other stacks may clear it */
12936 if (rack->r_ctl.crte != NULL) {
12937 tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
12938 rack->rack_hdrw_pacing = 0;
12939 rack->r_ctl.crte = NULL;
12941 #ifdef TCP_BLACKBOX
12942 tcp_log_flowend(tp);
12944 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) {
12946 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12948 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12950 panic("At fini, rack:%p rsm:%p rm:%p",
12954 uma_zfree(rack_zone, rsm);
12956 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
12958 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
12959 uma_zfree(rack_zone, rsm);
12960 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
12962 rack->rc_free_cnt = 0;
12963 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12964 tp->t_fb_ptr = NULL;
12967 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
12968 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
12969 tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
12970 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP;
12971 /* Cancel the GP measurement in progress */
12972 tp->t_flags &= ~TF_GPUTINPROG;
12973 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS;
12975 /* Make sure snd_nxt is correctly set */
12976 tp->snd_nxt = tp->snd_max;
12980 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
12982 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) {
12983 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
12985 switch (tp->t_state) {
12986 case TCPS_SYN_SENT:
12987 rack->r_state = TCPS_SYN_SENT;
12988 rack->r_substate = rack_do_syn_sent;
12990 case TCPS_SYN_RECEIVED:
12991 rack->r_state = TCPS_SYN_RECEIVED;
12992 rack->r_substate = rack_do_syn_recv;
12994 case TCPS_ESTABLISHED:
12995 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12996 rack->r_state = TCPS_ESTABLISHED;
12997 rack->r_substate = rack_do_established;
12999 case TCPS_CLOSE_WAIT:
13000 rack_set_pace_segments(tp, rack, __LINE__, NULL);
13001 rack->r_state = TCPS_CLOSE_WAIT;
13002 rack->r_substate = rack_do_close_wait;
13004 case TCPS_FIN_WAIT_1:
13005 rack_set_pace_segments(tp, rack, __LINE__, NULL);
13006 rack->r_state = TCPS_FIN_WAIT_1;
13007 rack->r_substate = rack_do_fin_wait_1;
13010 rack_set_pace_segments(tp, rack, __LINE__, NULL);
13011 rack->r_state = TCPS_CLOSING;
13012 rack->r_substate = rack_do_closing;
13014 case TCPS_LAST_ACK:
13015 rack_set_pace_segments(tp, rack, __LINE__, NULL);
13016 rack->r_state = TCPS_LAST_ACK;
13017 rack->r_substate = rack_do_lastack;
13019 case TCPS_FIN_WAIT_2:
13020 rack_set_pace_segments(tp, rack, __LINE__, NULL);
13021 rack->r_state = TCPS_FIN_WAIT_2;
13022 rack->r_substate = rack_do_fin_wait_2;
13026 case TCPS_TIME_WAIT:
13030 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
13031 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
13036 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
13039 * We received an ack, and then did not
13040 * call send or were bounced out due to the
13041 * hpts was running. Now a timer is up as well, is
13042 * it the right timer?
13044 struct rack_sendmap *rsm;
13047 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
13048 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
13050 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
13051 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
13052 (tmr_up == PACE_TMR_RXT)) {
13053 /* Should be an RXT */
13057 /* Nothing outstanding? */
13058 if (tp->t_flags & TF_DELACK) {
13059 if (tmr_up == PACE_TMR_DELACK)
13060 /* We are supposed to have delayed ack up and we do */
13062 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) {
13064 * if we hit enobufs then we would expect the possibility
13065 * of nothing outstanding and the RXT up (and the hptsi timer).
13068 } else if (((V_tcp_always_keepalive ||
13069 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
13070 (tp->t_state <= TCPS_CLOSING)) &&
13071 (tmr_up == PACE_TMR_KEEP) &&
13072 (tp->snd_max == tp->snd_una)) {
13073 /* We should have keep alive up and we do */
13077 if (SEQ_GT(tp->snd_max, tp->snd_una) &&
13078 ((tmr_up == PACE_TMR_TLP) ||
13079 (tmr_up == PACE_TMR_RACK) ||
13080 (tmr_up == PACE_TMR_RXT))) {
13082 * Either a Rack, TLP or RXT is fine if we
13083 * have outstanding data.
13086 } else if (tmr_up == PACE_TMR_DELACK) {
13088 * If the delayed ack was going to go off
13089 * before the rtx/tlp/rack timer were going to
13090 * expire, then that would be the timer in control.
13091 * Note we don't check the time here trusting the
13097 * Ok the timer originally started is not what we want now.
13098 * We will force the hpts to be stopped if any, and restart
13099 * with the slot set to what was in the saved slot.
13101 if (tcp_in_hpts(rack->rc_inp)) {
13102 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
13105 us_cts = tcp_get_usecs(NULL);
13106 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
13108 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
13110 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
13112 tcp_hpts_remove(tp->t_inpcb);
13114 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13115 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
13120 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq)
13122 if ((SEQ_LT(tp->snd_wl1, seq) ||
13123 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) ||
13124 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) {
13125 /* keep track of pure window updates */
13126 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd))
13127 KMOD_TCPSTAT_INC(tcps_rcvwinupd);
13128 tp->snd_wnd = tiwin;
13129 rack_validate_fo_sendwin_up(tp, rack);
13132 if (tp->snd_wnd > tp->max_sndwnd)
13133 tp->max_sndwnd = tp->snd_wnd;
13134 rack->r_wanted_output = 1;
13135 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) {
13136 tp->snd_wnd = tiwin;
13137 rack_validate_fo_sendwin_up(tp, rack);
13141 /* Not a valid win update */
13144 /* Do we exit persists? */
13145 if ((rack->rc_in_persist != 0) &&
13146 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
13147 rack->r_ctl.rc_pace_min_segs))) {
13148 rack_exit_persist(tp, rack, cts);
13150 /* Do we enter persists? */
13151 if ((rack->rc_in_persist == 0) &&
13152 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
13153 TCPS_HAVEESTABLISHED(tp->t_state) &&
13154 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
13155 sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
13156 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
13158 * Here the rwnd is less than
13159 * the pacing size, we are established,
13160 * nothing is outstanding, and there is
13161 * data to send. Enter persists.
13163 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
13168 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq)
13171 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
13172 union tcp_log_stackspecific log;
13173 struct timeval ltv;
13174 char tcp_hdr_buf[60];
13176 struct timespec ts;
13177 uint32_t orig_snd_una;
13180 #ifdef NETFLIX_HTTP_LOGGING
13181 struct http_sendfile_track *http_req;
13183 if (SEQ_GT(ae->ack, tp->snd_una)) {
13184 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1));
13186 http_req = tcp_http_find_req_for_seq(tp, ae->ack);
13189 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
13190 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
13191 if (rack->rack_no_prr == 0)
13192 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
13194 log.u_bbr.flex1 = 0;
13195 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
13196 log.u_bbr.use_lt_bw <<= 1;
13197 log.u_bbr.use_lt_bw |= rack->r_might_revert;
13198 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
13199 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
13200 log.u_bbr.pkts_out = tp->t_maxseg;
13201 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
13202 log.u_bbr.flex7 = 1;
13203 log.u_bbr.lost = ae->flags;
13204 log.u_bbr.cwnd_gain = ackval;
13205 log.u_bbr.pacing_gain = 0x2;
13206 if (ae->flags & TSTMP_HDWR) {
13207 /* Record the hardware timestamp if present */
13208 log.u_bbr.flex3 = M_TSTMP;
13209 ts.tv_sec = ae->timestamp / 1000000000;
13210 ts.tv_nsec = ae->timestamp % 1000000000;
13211 ltv.tv_sec = ts.tv_sec;
13212 ltv.tv_usec = ts.tv_nsec / 1000;
13213 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v);
13214 } else if (ae->flags & TSTMP_LRO) {
13215 /* Record the LRO the arrival timestamp */
13216 log.u_bbr.flex3 = M_TSTMP_LRO;
13217 ts.tv_sec = ae->timestamp / 1000000000;
13218 ts.tv_nsec = ae->timestamp % 1000000000;
13219 ltv.tv_sec = ts.tv_sec;
13220 ltv.tv_usec = ts.tv_nsec / 1000;
13221 log.u_bbr.flex5 = tcp_tv_to_usectick(<v);
13223 log.u_bbr.timeStamp = tcp_get_usecs(<v);
13224 /* Log the rcv time */
13225 log.u_bbr.delRate = ae->timestamp;
13226 #ifdef NETFLIX_HTTP_LOGGING
13227 log.u_bbr.applimited = tp->t_http_closed;
13228 log.u_bbr.applimited <<= 8;
13229 log.u_bbr.applimited |= tp->t_http_open;
13230 log.u_bbr.applimited <<= 8;
13231 log.u_bbr.applimited |= tp->t_http_req;
13233 /* Copy out any client req info */
13235 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
13237 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
13238 log.u_bbr.rttProp = http_req->timestamp;
13239 log.u_bbr.cur_del_rate = http_req->start;
13240 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
13241 log.u_bbr.flex8 |= 1;
13243 log.u_bbr.flex8 |= 2;
13244 log.u_bbr.bw_inuse = http_req->end;
13246 log.u_bbr.flex6 = http_req->start_seq;
13247 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
13248 log.u_bbr.flex8 |= 4;
13249 log.u_bbr.epoch = http_req->end_seq;
13253 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf));
13254 th = (struct tcphdr *)tcp_hdr_buf;
13255 th->th_seq = ae->seq;
13256 th->th_ack = ae->ack;
13257 th->th_win = ae->win;
13258 /* Now fill in the ports */
13259 th->th_sport = tp->t_inpcb->inp_fport;
13260 th->th_dport = tp->t_inpcb->inp_lport;
13261 tcp_set_flags(th, ae->flags);
13262 /* Now do we have a timestamp option? */
13263 if (ae->flags & HAS_TSTMP) {
13267 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2);
13268 cp = (u_char *)(th + 1);
13273 *cp = TCPOPT_TIMESTAMP;
13275 *cp = TCPOLEN_TIMESTAMP;
13277 val = htonl(ae->ts_value);
13278 bcopy((char *)&val,
13279 (char *)cp, sizeof(uint32_t));
13280 val = htonl(ae->ts_echo);
13281 bcopy((char *)&val,
13282 (char *)(cp + 4), sizeof(uint32_t));
13284 th->th_off = (sizeof(struct tcphdr) >> 2);
13287 * For sane logging we need to play a little trick.
13288 * If the ack were fully processed we would have moved
13289 * snd_una to high_seq, but since compressed acks are
13290 * processed in two phases, at this point (logging) snd_una
13291 * won't be advanced. So we would see multiple acks showing
13292 * the advancement. We can prevent that by "pretending" that
13293 * snd_una was advanced and then un-advancing it so that the
13294 * logging code has the right value for tlb_snd_una.
13296 if (tp->snd_una != high_seq) {
13297 orig_snd_una = tp->snd_una;
13298 tp->snd_una = high_seq;
13302 TCP_LOG_EVENTP(tp, th,
13303 &tp->t_inpcb->inp_socket->so_rcv,
13304 &tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0,
13305 0, &log, true, <v);
13307 tp->snd_una = orig_snd_una;
13314 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts)
13318 * A persist or keep-alive was forced out, update our
13319 * min rtt time. Note now worry about lost responses.
13320 * When a subsequent keep-alive or persist times out
13321 * and forced_ack is still on, then the last probe
13322 * was not responded to. In such cases we have a
13323 * sysctl that controls the behavior. Either we apply
13324 * the rtt but with reduced confidence (0). Or we just
13325 * plain don't apply the rtt estimate. Having data flow
13326 * will clear the probe_not_answered flag i.e. cum-ack
13327 * move forward <or> exiting and reentering persists.
13330 rack->forced_ack = 0;
13331 rack->rc_tp->t_rxtshift = 0;
13332 if ((rack->rc_in_persist &&
13333 (tiwin == rack->rc_tp->snd_wnd)) ||
13334 (rack->rc_in_persist == 0)) {
13336 * In persists only apply the RTT update if this is
13337 * a response to our window probe. And that
13338 * means the rwnd sent must match the current
13339 * snd_wnd. If it does not, then we got a
13340 * window update ack instead. For keepalive
13341 * we allow the answer no matter what the window.
13343 * Note that if the probe_not_answered is set then
13344 * the forced_ack_ts is the oldest one i.e. the first
13345 * probe sent that might have been lost. This assures
13346 * us that if we do calculate an RTT it is longer not
13347 * some short thing.
13349 if (rack->rc_in_persist)
13350 counter_u64_add(rack_persists_acks, 1);
13351 us_rtt = us_cts - rack->r_ctl.forced_ack_ts;
13354 if (rack->probe_not_answered == 0) {
13355 rack_apply_updated_usrtt(rack, us_rtt, us_cts);
13356 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1);
13358 /* We have a retransmitted probe here too */
13359 if (rack_apply_rtt_with_reduced_conf) {
13360 rack_apply_updated_usrtt(rack, us_rtt, us_cts);
13361 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1);
13368 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv)
13371 * Handle a "special" compressed ack mbuf. Each incoming
13372 * ack has only four possible dispositions:
13374 * A) It moves the cum-ack forward
13375 * B) It is behind the cum-ack.
13376 * C) It is a window-update ack.
13377 * D) It is a dup-ack.
13379 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES
13380 * in the incoming mbuf. We also need to still pay attention
13381 * to nxt_pkt since there may be another packet after this
13384 #ifdef TCP_ACCOUNTING
13389 struct timespec ts;
13390 struct tcp_rack *rack;
13391 struct tcp_ackent *ae;
13392 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack;
13393 int cnt, i, did_out, ourfinisacked = 0;
13394 struct tcpopt to_holder, *to = NULL;
13395 #ifdef TCP_ACCOUNTING
13396 int win_up_req = 0;
13399 int under_pacing = 1;
13401 #ifdef TCP_ACCOUNTING
13404 rack = (struct tcp_rack *)tp->t_fb_ptr;
13405 if (rack->gp_ready &&
13406 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT))
13411 if (rack->r_state != tp->t_state)
13412 rack_set_state(tp, rack);
13413 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
13414 (tp->t_flags & TF_GPUTINPROG)) {
13416 * We have a goodput in progress
13417 * and we have entered a late state.
13418 * Do we have enough data in the sb
13419 * to handle the GPUT request?
13423 bytes = tp->gput_ack - tp->gput_seq;
13424 if (SEQ_GT(tp->gput_seq, tp->snd_una))
13425 bytes += tp->gput_seq - tp->snd_una;
13426 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
13428 * There are not enough bytes in the socket
13429 * buffer that have been sent to cover this
13430 * measurement. Cancel it.
13432 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
13433 rack->r_ctl.rc_gp_srtt /*flex1*/,
13435 0, 0, 18, __LINE__, NULL, 0);
13436 tp->t_flags &= ~TF_GPUTINPROG;
13441 KASSERT((m->m_len >= sizeof(struct tcp_ackent)),
13442 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len));
13443 cnt = m->m_len / sizeof(struct tcp_ackent);
13444 counter_u64_add(rack_multi_single_eq, cnt);
13445 high_seq = tp->snd_una;
13446 the_win = tp->snd_wnd;
13447 win_seq = tp->snd_wl1;
13448 win_upd_ack = tp->snd_wl2;
13449 cts = tcp_tv_to_usectick(tv);
13450 ms_cts = tcp_tv_to_mssectick(tv);
13451 rack->r_ctl.rc_rcvtime = cts;
13452 segsiz = ctf_fixed_maxseg(tp);
13453 if ((rack->rc_gp_dyn_mul) &&
13454 (rack->use_fixed_rate == 0) &&
13455 (rack->rc_always_pace)) {
13456 /* Check in on probertt */
13457 rack_check_probe_rtt(rack, cts);
13459 for (i = 0; i < cnt; i++) {
13460 #ifdef TCP_ACCOUNTING
13461 ts_val = get_cyclecount();
13463 rack_clear_rate_sample(rack);
13464 ae = ((mtod(m, struct tcp_ackent *)) + i);
13465 /* Setup the window */
13466 tiwin = ae->win << tp->snd_scale;
13467 if (tiwin > rack->r_ctl.rc_high_rwnd)
13468 rack->r_ctl.rc_high_rwnd = tiwin;
13469 /* figure out the type of ack */
13470 if (SEQ_LT(ae->ack, high_seq)) {
13472 ae->ack_val_set = ACK_BEHIND;
13473 } else if (SEQ_GT(ae->ack, high_seq)) {
13475 ae->ack_val_set = ACK_CUMACK;
13476 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){
13478 ae->ack_val_set = ACK_DUPACK;
13481 ae->ack_val_set = ACK_RWND;
13483 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq);
13484 /* Validate timestamp */
13485 if (ae->flags & HAS_TSTMP) {
13486 /* Setup for a timestamp */
13487 to->to_flags = TOF_TS;
13488 ae->ts_echo -= tp->ts_offset;
13489 to->to_tsecr = ae->ts_echo;
13490 to->to_tsval = ae->ts_value;
13492 * If echoed timestamp is later than the current time, fall back to
13493 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
13494 * were used when this connection was established.
13496 if (TSTMP_GT(ae->ts_echo, ms_cts))
13498 if (tp->ts_recent &&
13499 TSTMP_LT(ae->ts_value, tp->ts_recent)) {
13500 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) {
13501 #ifdef TCP_ACCOUNTING
13502 rdstc = get_cyclecount();
13503 if (rdstc > ts_val) {
13504 counter_u64_add(tcp_proc_time[ae->ack_val_set] ,
13506 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13507 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
13514 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) &&
13515 SEQ_LEQ(tp->last_ack_sent, ae->seq)) {
13516 tp->ts_recent_age = tcp_ts_getticks();
13517 tp->ts_recent = ae->ts_value;
13520 /* Setup for a no options */
13523 /* Update the rcv time and perform idle reduction possibly */
13524 if (tp->t_idle_reduce &&
13525 (tp->snd_max == tp->snd_una) &&
13526 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
13527 counter_u64_add(rack_input_idle_reduces, 1);
13528 rack_cc_after_idle(rack, tp);
13530 tp->t_rcvtime = ticks;
13531 /* Now what about ECN? */
13532 if (tcp_ecn_input_segment(tp, ae->flags, ae->codepoint))
13533 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__);
13534 #ifdef TCP_ACCOUNTING
13535 /* Count for the specific type of ack in */
13536 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1);
13537 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13538 tp->tcp_cnt_counters[ae->ack_val_set]++;
13542 * Note how we could move up these in the determination
13543 * above, but we don't so that way the timestamp checks (and ECN)
13544 * is done first before we do any processing on the ACK.
13545 * The non-compressed path through the code has this
13546 * weakness (noted by @jtl) that it actually does some
13547 * processing before verifying the timestamp information.
13548 * We don't take that path here which is why we set
13549 * the ack_val_set first, do the timestamp and ecn
13550 * processing, and then look at what we have setup.
13552 if (ae->ack_val_set == ACK_BEHIND) {
13554 * Case B flag reordering, if window is not closed
13555 * or it could be a keep-alive or persists
13557 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) {
13558 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
13560 } else if (ae->ack_val_set == ACK_DUPACK) {
13562 rack_strike_dupack(rack);
13563 } else if (ae->ack_val_set == ACK_RWND) {
13565 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) {
13566 ts.tv_sec = ae->timestamp / 1000000000;
13567 ts.tv_nsec = ae->timestamp % 1000000000;
13568 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
13569 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
13571 rack->r_ctl.act_rcv_time = *tv;
13573 if (rack->forced_ack) {
13574 rack_handle_probe_response(rack, tiwin,
13575 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
13577 #ifdef TCP_ACCOUNTING
13580 win_upd_ack = ae->ack;
13583 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq);
13586 if (SEQ_GT(ae->ack, tp->snd_max)) {
13588 * We just send an ack since the incoming
13589 * ack is beyond the largest seq we sent.
13591 if ((tp->t_flags & TF_ACKNOW) == 0) {
13592 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt);
13593 if (tp->t_flags && TF_ACKNOW)
13594 rack->r_wanted_output = 1;
13598 /* If the window changed setup to update */
13599 if (tiwin != tp->snd_wnd) {
13600 win_upd_ack = ae->ack;
13603 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq);
13605 #ifdef TCP_ACCOUNTING
13606 /* Account for the acks */
13607 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13608 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz);
13610 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN],
13611 (((ae->ack - high_seq) + segsiz - 1) / segsiz));
13613 high_seq = ae->ack;
13614 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
13615 union tcp_log_stackspecific log;
13618 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
13619 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
13620 log.u_bbr.flex1 = high_seq;
13621 log.u_bbr.flex2 = rack->r_ctl.roundends;
13622 log.u_bbr.flex3 = rack->r_ctl.current_round;
13623 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround;
13624 log.u_bbr.flex8 = 8;
13625 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
13626 0, &log, false, NULL, NULL, 0, &tv);
13629 * The draft (v3) calls for us to use SEQ_GEQ, but that
13630 * causes issues when we are just going app limited. Lets
13631 * instead use SEQ_GT <or> where its equal but more data
13634 if ((SEQ_GT(high_seq, rack->r_ctl.roundends)) ||
13635 ((high_seq == rack->r_ctl.roundends) &&
13636 SEQ_GT(tp->snd_max, tp->snd_una))) {
13637 rack->r_ctl.current_round++;
13638 rack->r_ctl.roundends = tp->snd_max;
13639 if (CC_ALGO(tp)->newround != NULL) {
13640 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round);
13643 /* Setup our act_rcv_time */
13644 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) {
13645 ts.tv_sec = ae->timestamp / 1000000000;
13646 ts.tv_nsec = ae->timestamp % 1000000000;
13647 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
13648 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
13650 rack->r_ctl.act_rcv_time = *tv;
13652 rack_process_to_cumack(tp, rack, ae->ack, cts, to);
13653 if (rack->rc_dsack_round_seen) {
13654 /* Is the dsack round over? */
13655 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) {
13657 rack->rc_dsack_round_seen = 0;
13658 rack_log_dsack_event(rack, 3, __LINE__, 0, 0);
13663 /* And lets be sure to commit the rtt measurements for this ack */
13664 tcp_rack_xmit_timer_commit(rack, tp);
13665 #ifdef TCP_ACCOUNTING
13666 rdstc = get_cyclecount();
13667 if (rdstc > ts_val) {
13668 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val));
13669 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13670 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
13671 if (ae->ack_val_set == ACK_CUMACK)
13672 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val);
13677 #ifdef TCP_ACCOUNTING
13678 ts_val = get_cyclecount();
13680 /* Tend to any collapsed window */
13681 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) {
13682 /* The peer collapsed the window */
13683 rack_collapsed_window(rack, (tp->snd_max - high_seq), __LINE__);
13684 } else if (rack->rc_has_collapsed)
13685 rack_un_collapse_window(rack, __LINE__);
13686 if ((rack->r_collapse_point_valid) &&
13687 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point)))
13688 rack->r_collapse_point_valid = 0;
13689 acked_amount = acked = (high_seq - tp->snd_una);
13692 * Clear the probe not answered flag
13693 * since cum-ack moved forward.
13695 rack->probe_not_answered = 0;
13696 if (rack->sack_attack_disable == 0)
13697 rack_do_decay(rack);
13698 if (acked >= segsiz) {
13700 * You only get credit for
13701 * MSS and greater (and you get extra
13702 * credit for larger cum-ack moves).
13706 ac = acked / segsiz;
13707 rack->r_ctl.ack_count += ac;
13708 counter_u64_add(rack_ack_total, ac);
13710 if (rack->r_ctl.ack_count > 0xfff00000) {
13712 * reduce the number to keep us under
13715 rack->r_ctl.ack_count /= 2;
13716 rack->r_ctl.sack_count /= 2;
13718 if (tp->t_flags & TF_NEEDSYN) {
13720 * T/TCP: Connection was half-synchronized, and our SYN has
13721 * been ACK'd (so connection is now fully synchronized). Go
13722 * to non-starred state, increment snd_una for ACK of SYN,
13723 * and check if we can do window scaling.
13725 tp->t_flags &= ~TF_NEEDSYN;
13727 acked_amount = acked = (high_seq - tp->snd_una);
13729 if (acked > sbavail(&so->so_snd))
13730 acked_amount = sbavail(&so->so_snd);
13731 #ifdef NETFLIX_EXP_DETECTION
13733 * We only care on a cum-ack move if we are in a sack-disabled
13734 * state. We have already added in to the ack_count, and we never
13735 * would disable on a cum-ack move, so we only care to do the
13736 * detection if it may "undo" it, i.e. we were in disabled already.
13738 if (rack->sack_attack_disable)
13739 rack_do_detection(tp, rack, acked_amount, segsiz);
13741 if (IN_FASTRECOVERY(tp->t_flags) &&
13742 (rack->rack_no_prr == 0))
13743 rack_update_prr(tp, rack, acked_amount, high_seq);
13744 if (IN_RECOVERY(tp->t_flags)) {
13745 if (SEQ_LT(high_seq, tp->snd_recover) &&
13746 (SEQ_LT(high_seq, tp->snd_max))) {
13747 tcp_rack_partialack(tp);
13749 rack_post_recovery(tp, high_seq);
13753 /* Handle the rack-log-ack part (sendmap) */
13754 if ((sbused(&so->so_snd) == 0) &&
13755 (acked > acked_amount) &&
13756 (tp->t_state >= TCPS_FIN_WAIT_1) &&
13757 (tp->t_flags & TF_SENTFIN)) {
13759 * We must be sure our fin
13760 * was sent and acked (we can be
13761 * in FIN_WAIT_1 without having
13766 * Lets make sure snd_una is updated
13767 * since most likely acked_amount = 0 (it
13770 tp->snd_una = high_seq;
13772 /* Did we make a RTO error? */
13773 if ((tp->t_flags & TF_PREVVALID) &&
13774 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
13775 tp->t_flags &= ~TF_PREVVALID;
13776 if (tp->t_rxtshift == 1 &&
13777 (int)(ticks - tp->t_badrxtwin) < 0)
13778 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__);
13780 /* Handle the data in the socket buffer */
13781 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1);
13782 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
13783 if (acked_amount > 0) {
13784 struct mbuf *mfree;
13786 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery);
13787 SOCKBUF_LOCK(&so->so_snd);
13788 mfree = sbcut_locked(&so->so_snd, acked_amount);
13789 tp->snd_una = high_seq;
13790 /* Note we want to hold the sb lock through the sendmap adjust */
13791 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
13792 /* Wake up the socket if we have room to write more */
13793 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
13794 sowwakeup_locked(so);
13797 /* update progress */
13798 tp->t_acktime = ticks;
13799 rack_log_progress_event(rack, tp, tp->t_acktime,
13800 PROGRESS_UPDATE, __LINE__);
13801 /* Clear out shifts and such */
13802 tp->t_rxtshift = 0;
13803 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
13804 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
13805 rack->rc_tlp_in_progress = 0;
13806 rack->r_ctl.rc_tlp_cnt_out = 0;
13807 /* Send recover and snd_nxt must be dragged along */
13808 if (SEQ_GT(tp->snd_una, tp->snd_recover))
13809 tp->snd_recover = tp->snd_una;
13810 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
13811 tp->snd_nxt = tp->snd_una;
13813 * If the RXT timer is running we want to
13814 * stop it, so we can restart a TLP (or new RXT).
13816 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
13817 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13818 #ifdef NETFLIX_HTTP_LOGGING
13819 tcp_http_check_for_comp(rack->rc_tp, high_seq);
13821 tp->snd_wl2 = high_seq;
13823 if (under_pacing &&
13824 (rack->use_fixed_rate == 0) &&
13825 (rack->in_probe_rtt == 0) &&
13826 rack->rc_gp_dyn_mul &&
13827 rack->rc_always_pace) {
13828 /* Check if we are dragging bottom */
13829 rack_check_bottom_drag(tp, rack, so, acked);
13831 if (tp->snd_una == tp->snd_max) {
13832 tp->t_flags &= ~TF_PREVVALID;
13833 rack->r_ctl.retran_during_recovery = 0;
13834 rack->r_ctl.dsack_byte_cnt = 0;
13835 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
13836 if (rack->r_ctl.rc_went_idle_time == 0)
13837 rack->r_ctl.rc_went_idle_time = 1;
13838 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
13839 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
13841 /* Set so we might enter persists... */
13842 rack->r_wanted_output = 1;
13843 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13844 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
13845 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
13846 (sbavail(&so->so_snd) == 0) &&
13847 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
13849 * The socket was gone and the
13850 * peer sent data (not now in the past), time to
13853 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13854 /* tcp_close will kill the inp pre-log the Reset */
13855 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
13856 #ifdef TCP_ACCOUNTING
13857 rdstc = get_cyclecount();
13858 if (rdstc > ts_val) {
13859 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val));
13860 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13861 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13862 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13867 tp = tcp_close(tp);
13869 #ifdef TCP_ACCOUNTING
13875 * We would normally do drop-with-reset which would
13876 * send back a reset. We can't since we don't have
13877 * all the needed bits. Instead lets arrange for
13878 * a call to tcp_output(). That way since we
13879 * are in the closed state we will generate a reset.
13881 * Note if tcp_accounting is on we don't unpin since
13882 * we do that after the goto label.
13884 goto send_out_a_rst;
13886 if ((sbused(&so->so_snd) == 0) &&
13887 (tp->t_state >= TCPS_FIN_WAIT_1) &&
13888 (tp->t_flags & TF_SENTFIN)) {
13890 * If we can't receive any more data, then closing user can
13891 * proceed. Starting the timer is contrary to the
13892 * specification, but if we don't get a FIN we'll hang
13896 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13897 soisdisconnected(so);
13898 tcp_timer_activate(tp, TT_2MSL,
13899 (tcp_fast_finwait2_recycle ?
13900 tcp_finwait2_timeout :
13903 if (ourfinisacked == 0) {
13905 * We don't change to fin-wait-2 if we have our fin acked
13906 * which means we are probably in TCPS_CLOSING.
13908 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13912 /* Wake up the socket if we have room to write more */
13913 if (sbavail(&so->so_snd)) {
13914 rack->r_wanted_output = 1;
13915 if (ctf_progress_timeout_check(tp, true)) {
13916 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
13917 tp, tick, PROGRESS_DROP, __LINE__);
13919 * We cheat here and don't send a RST, we should send one
13920 * when the pacer drops the connection.
13922 #ifdef TCP_ACCOUNTING
13923 rdstc = get_cyclecount();
13924 if (rdstc > ts_val) {
13925 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val));
13926 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13927 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13928 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13933 (void)tcp_drop(tp, ETIMEDOUT);
13938 if (ourfinisacked) {
13939 switch(tp->t_state) {
13941 #ifdef TCP_ACCOUNTING
13942 rdstc = get_cyclecount();
13943 if (rdstc > ts_val) {
13944 counter_u64_add(tcp_proc_time[ACK_CUMACK] ,
13946 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13947 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13948 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13957 case TCPS_LAST_ACK:
13958 #ifdef TCP_ACCOUNTING
13959 rdstc = get_cyclecount();
13960 if (rdstc > ts_val) {
13961 counter_u64_add(tcp_proc_time[ACK_CUMACK] ,
13963 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13964 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13965 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13970 tp = tcp_close(tp);
13971 ctf_do_drop(m, tp);
13974 case TCPS_FIN_WAIT_1:
13975 #ifdef TCP_ACCOUNTING
13976 rdstc = get_cyclecount();
13977 if (rdstc > ts_val) {
13978 counter_u64_add(tcp_proc_time[ACK_CUMACK] ,
13980 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13981 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13982 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13986 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13987 soisdisconnected(so);
13988 tcp_timer_activate(tp, TT_2MSL,
13989 (tcp_fast_finwait2_recycle ?
13990 tcp_finwait2_timeout :
13993 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13999 if (rack->r_fast_output) {
14001 * We re doing fast output.. can we expand that?
14003 rack_gain_for_fastoutput(rack, tp, so, acked_amount);
14005 #ifdef TCP_ACCOUNTING
14006 rdstc = get_cyclecount();
14007 if (rdstc > ts_val) {
14008 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val));
14009 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
14010 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
14011 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
14015 } else if (win_up_req) {
14016 rdstc = get_cyclecount();
14017 if (rdstc > ts_val) {
14018 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val));
14019 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
14020 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val);
14025 /* Now is there a next packet, if so we are done */
14029 #ifdef TCP_ACCOUNTING
14032 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs);
14035 rack_handle_might_revert(tp, rack);
14036 ctf_calc_rwin(so, tp);
14037 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
14039 if (tcp_output(tp) < 0) {
14040 #ifdef TCP_ACCOUNTING
14047 rack_free_trim(rack);
14048 #ifdef TCP_ACCOUNTING
14051 rack_timer_audit(tp, rack, &so->so_snd);
14052 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs);
14058 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
14059 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
14060 int32_t nxt_pkt, struct timeval *tv)
14062 #ifdef TCP_ACCOUNTING
14065 int32_t thflags, retval, did_out = 0;
14066 int32_t way_out = 0;
14068 * cts - is the current time from tv (caller gets ts) in microseconds.
14069 * ms_cts - is the current time from tv in milliseconds.
14070 * us_cts - is the time that LRO or hardware actually got the packet in microseconds.
14072 uint32_t cts, us_cts, ms_cts;
14073 uint32_t tiwin, high_seq;
14074 struct timespec ts;
14076 struct tcp_rack *rack;
14077 struct rack_sendmap *rsm;
14078 int32_t prev_state = 0;
14079 #ifdef TCP_ACCOUNTING
14080 int ack_val_set = 0xf;
14084 * tv passed from common code is from either M_TSTMP_LRO or
14085 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present.
14087 rack = (struct tcp_rack *)tp->t_fb_ptr;
14088 if (m->m_flags & M_ACKCMP) {
14090 * All compressed ack's are ack's by definition so
14091 * remove any ack required flag and then do the processing.
14093 rack->rc_ack_required = 0;
14094 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv));
14096 if (m->m_flags & M_ACKCMP) {
14097 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp);
14099 cts = tcp_tv_to_usectick(tv);
14100 ms_cts = tcp_tv_to_mssectick(tv);
14101 nsegs = m->m_pkthdr.lro_nsegs;
14102 counter_u64_add(rack_proc_non_comp_ack, 1);
14103 thflags = tcp_get_flags(th);
14104 #ifdef TCP_ACCOUNTING
14106 if (thflags & TH_ACK)
14107 ts_val = get_cyclecount();
14109 if ((m->m_flags & M_TSTMP) ||
14110 (m->m_flags & M_TSTMP_LRO)) {
14111 mbuf_tstmp2timespec(m, &ts);
14112 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
14113 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
14115 rack->r_ctl.act_rcv_time = *tv;
14116 kern_prefetch(rack, &prev_state);
14119 * Unscale the window into a 32-bit value. For the SYN_SENT state
14120 * the scale is zero.
14122 tiwin = th->th_win << tp->snd_scale;
14123 #ifdef TCP_ACCOUNTING
14124 if (thflags & TH_ACK) {
14126 * We have a tradeoff here. We can either do what we are
14127 * doing i.e. pinning to this CPU and then doing the accounting
14128 * <or> we could do a critical enter, setup the rdtsc and cpu
14129 * as in below, and then validate we are on the same CPU on
14130 * exit. I have choosen to not do the critical enter since
14131 * that often will gain you a context switch, and instead lock
14132 * us (line above this if) to the same CPU with sched_pin(). This
14133 * means we may be context switched out for a higher priority
14134 * interupt but we won't be moved to another CPU.
14136 * If this occurs (which it won't very often since we most likely
14137 * are running this code in interupt context and only a higher
14138 * priority will bump us ... clock?) we will falsely add in
14139 * to the time the interupt processing time plus the ack processing
14140 * time. This is ok since its a rare event.
14142 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin,
14143 ctf_fixed_maxseg(tp));
14147 * Parse options on any incoming segment.
14149 memset(&to, 0, sizeof(to));
14150 tcp_dooptions(&to, (u_char *)(th + 1),
14151 (th->th_off << 2) - sizeof(struct tcphdr),
14152 (thflags & TH_SYN) ? TO_SYN : 0);
14153 NET_EPOCH_ASSERT();
14154 INP_WLOCK_ASSERT(tp->t_inpcb);
14155 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
14158 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
14159 (tp->t_flags & TF_GPUTINPROG)) {
14161 * We have a goodput in progress
14162 * and we have entered a late state.
14163 * Do we have enough data in the sb
14164 * to handle the GPUT request?
14168 bytes = tp->gput_ack - tp->gput_seq;
14169 if (SEQ_GT(tp->gput_seq, tp->snd_una))
14170 bytes += tp->gput_seq - tp->snd_una;
14171 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
14173 * There are not enough bytes in the socket
14174 * buffer that have been sent to cover this
14175 * measurement. Cancel it.
14177 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
14178 rack->r_ctl.rc_gp_srtt /*flex1*/,
14180 0, 0, 18, __LINE__, NULL, 0);
14181 tp->t_flags &= ~TF_GPUTINPROG;
14184 high_seq = th->th_ack;
14185 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
14186 union tcp_log_stackspecific log;
14187 struct timeval ltv;
14188 #ifdef NETFLIX_HTTP_LOGGING
14189 struct http_sendfile_track *http_req;
14191 if (SEQ_GT(th->th_ack, tp->snd_una)) {
14192 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1));
14194 http_req = tcp_http_find_req_for_seq(tp, th->th_ack);
14197 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
14198 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
14199 if (rack->rack_no_prr == 0)
14200 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
14202 log.u_bbr.flex1 = 0;
14203 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
14204 log.u_bbr.use_lt_bw <<= 1;
14205 log.u_bbr.use_lt_bw |= rack->r_might_revert;
14206 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
14207 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14208 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
14209 log.u_bbr.flex3 = m->m_flags;
14210 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
14211 log.u_bbr.lost = thflags;
14212 log.u_bbr.pacing_gain = 0x1;
14213 #ifdef TCP_ACCOUNTING
14214 log.u_bbr.cwnd_gain = ack_val_set;
14216 log.u_bbr.flex7 = 2;
14217 if (m->m_flags & M_TSTMP) {
14218 /* Record the hardware timestamp if present */
14219 mbuf_tstmp2timespec(m, &ts);
14220 ltv.tv_sec = ts.tv_sec;
14221 ltv.tv_usec = ts.tv_nsec / 1000;
14222 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v);
14223 } else if (m->m_flags & M_TSTMP_LRO) {
14224 /* Record the LRO the arrival timestamp */
14225 mbuf_tstmp2timespec(m, &ts);
14226 ltv.tv_sec = ts.tv_sec;
14227 ltv.tv_usec = ts.tv_nsec / 1000;
14228 log.u_bbr.flex5 = tcp_tv_to_usectick(<v);
14230 log.u_bbr.timeStamp = tcp_get_usecs(<v);
14231 /* Log the rcv time */
14232 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp;
14233 #ifdef NETFLIX_HTTP_LOGGING
14234 log.u_bbr.applimited = tp->t_http_closed;
14235 log.u_bbr.applimited <<= 8;
14236 log.u_bbr.applimited |= tp->t_http_open;
14237 log.u_bbr.applimited <<= 8;
14238 log.u_bbr.applimited |= tp->t_http_req;
14240 /* Copy out any client req info */
14242 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
14244 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
14245 log.u_bbr.rttProp = http_req->timestamp;
14246 log.u_bbr.cur_del_rate = http_req->start;
14247 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
14248 log.u_bbr.flex8 |= 1;
14250 log.u_bbr.flex8 |= 2;
14251 log.u_bbr.bw_inuse = http_req->end;
14253 log.u_bbr.flex6 = http_req->start_seq;
14254 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
14255 log.u_bbr.flex8 |= 4;
14256 log.u_bbr.epoch = http_req->end_seq;
14260 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
14261 tlen, &log, true, <v);
14263 /* Remove ack required flag if set, we have one */
14264 if (thflags & TH_ACK)
14265 rack->rc_ack_required = 0;
14266 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
14270 goto done_with_input;
14273 * If a segment with the ACK-bit set arrives in the SYN-SENT state
14274 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9.
14276 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
14277 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
14278 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
14279 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
14280 #ifdef TCP_ACCOUNTING
14286 * If timestamps were negotiated during SYN/ACK and a
14287 * segment without a timestamp is received, silently drop
14288 * the segment, unless it is a RST segment or missing timestamps are
14290 * See section 3.2 of RFC 7323.
14292 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) &&
14293 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) {
14297 goto done_with_input;
14301 * Segment received on connection. Reset idle time and keep-alive
14302 * timer. XXX: This should be done after segment validation to
14303 * ignore broken/spoofed segs.
14305 if (tp->t_idle_reduce &&
14306 (tp->snd_max == tp->snd_una) &&
14307 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
14308 counter_u64_add(rack_input_idle_reduces, 1);
14309 rack_cc_after_idle(rack, tp);
14311 tp->t_rcvtime = ticks;
14313 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
14315 if (tiwin > rack->r_ctl.rc_high_rwnd)
14316 rack->r_ctl.rc_high_rwnd = tiwin;
14318 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
14319 * this to occur after we've validated the segment.
14321 if (tcp_ecn_input_segment(tp, thflags, iptos))
14322 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__);
14325 * If echoed timestamp is later than the current time, fall back to
14326 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
14327 * were used when this connection was established.
14329 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
14330 to.to_tsecr -= tp->ts_offset;
14331 if (TSTMP_GT(to.to_tsecr, ms_cts))
14336 * If its the first time in we need to take care of options and
14337 * verify we can do SACK for rack!
14339 if (rack->r_state == 0) {
14340 /* Should be init'd by rack_init() */
14341 KASSERT(rack->rc_inp != NULL,
14342 ("%s: rack->rc_inp unexpectedly NULL", __func__));
14343 if (rack->rc_inp == NULL) {
14344 rack->rc_inp = tp->t_inpcb;
14348 * Process options only when we get SYN/ACK back. The SYN
14349 * case for incoming connections is handled in tcp_syncache.
14350 * According to RFC1323 the window field in a SYN (i.e., a
14351 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
14352 * this is traditional behavior, may need to be cleaned up.
14354 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
14355 /* Handle parallel SYN for ECN */
14356 tcp_ecn_input_parallel_syn(tp, thflags, iptos);
14357 if ((to.to_flags & TOF_SCALE) &&
14358 (tp->t_flags & TF_REQ_SCALE)) {
14359 tp->t_flags |= TF_RCVD_SCALE;
14360 tp->snd_scale = to.to_wscale;
14362 tp->t_flags &= ~TF_REQ_SCALE;
14364 * Initial send window. It will be updated with the
14365 * next incoming segment to the scaled value.
14367 tp->snd_wnd = th->th_win;
14368 rack_validate_fo_sendwin_up(tp, rack);
14369 if ((to.to_flags & TOF_TS) &&
14370 (tp->t_flags & TF_REQ_TSTMP)) {
14371 tp->t_flags |= TF_RCVD_TSTMP;
14372 tp->ts_recent = to.to_tsval;
14373 tp->ts_recent_age = cts;
14375 tp->t_flags &= ~TF_REQ_TSTMP;
14376 if (to.to_flags & TOF_MSS) {
14377 tcp_mss(tp, to.to_mss);
14379 if ((tp->t_flags & TF_SACK_PERMIT) &&
14380 (to.to_flags & TOF_SACKPERM) == 0)
14381 tp->t_flags &= ~TF_SACK_PERMIT;
14382 if (IS_FASTOPEN(tp->t_flags)) {
14383 if (to.to_flags & TOF_FASTOPEN) {
14386 if (to.to_flags & TOF_MSS)
14389 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
14393 tcp_fastopen_update_cache(tp, mss,
14394 to.to_tfo_len, to.to_tfo_cookie);
14396 tcp_fastopen_disable_path(tp);
14400 * At this point we are at the initial call. Here we decide
14401 * if we are doing RACK or not. We do this by seeing if
14402 * TF_SACK_PERMIT is set and the sack-not-required is clear.
14403 * The code now does do dup-ack counting so if you don't
14404 * switch back you won't get rack & TLP, but you will still
14408 if ((rack_sack_not_required == 0) &&
14409 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
14410 tcp_switch_back_to_default(tp);
14411 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen,
14413 #ifdef TCP_ACCOUNTING
14418 tcp_set_hpts(tp->t_inpcb);
14419 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
14421 if (thflags & TH_FIN)
14422 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
14423 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
14424 if ((rack->rc_gp_dyn_mul) &&
14425 (rack->use_fixed_rate == 0) &&
14426 (rack->rc_always_pace)) {
14427 /* Check in on probertt */
14428 rack_check_probe_rtt(rack, us_cts);
14430 rack_clear_rate_sample(rack);
14431 if ((rack->forced_ack) &&
14432 ((tcp_get_flags(th) & TH_RST) == 0)) {
14433 rack_handle_probe_response(rack, tiwin, us_cts);
14436 * This is the one exception case where we set the rack state
14437 * always. All other times (timers etc) we must have a rack-state
14438 * set (so we assure we have done the checks above for SACK).
14440 rack->r_ctl.rc_rcvtime = cts;
14441 if (rack->r_state != tp->t_state)
14442 rack_set_state(tp, rack);
14443 if (SEQ_GT(th->th_ack, tp->snd_una) &&
14444 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL)
14445 kern_prefetch(rsm, &prev_state);
14446 prev_state = rack->r_state;
14447 retval = (*rack->r_substate) (m, th, so,
14448 tp, &to, drop_hdrlen,
14449 tlen, tiwin, thflags, nxt_pkt, iptos);
14451 if ((retval == 0) &&
14452 (tp->t_inpcb == NULL)) {
14453 panic("retval:%d tp:%p t_inpcb:NULL state:%d",
14454 retval, tp, prev_state);
14459 * If retval is 1 the tcb is unlocked and most likely the tp
14462 INP_WLOCK_ASSERT(tp->t_inpcb);
14463 if ((rack->rc_gp_dyn_mul) &&
14464 (rack->rc_always_pace) &&
14465 (rack->use_fixed_rate == 0) &&
14466 rack->in_probe_rtt &&
14467 (rack->r_ctl.rc_time_probertt_starts == 0)) {
14469 * If we are going for target, lets recheck before
14472 rack_check_probe_rtt(rack, us_cts);
14474 if (rack->set_pacing_done_a_iw == 0) {
14475 /* How much has been acked? */
14476 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) {
14477 /* We have enough to set in the pacing segment size */
14478 rack->set_pacing_done_a_iw = 1;
14479 rack_set_pace_segments(tp, rack, __LINE__, NULL);
14482 tcp_rack_xmit_timer_commit(rack, tp);
14483 #ifdef TCP_ACCOUNTING
14485 * If we set the ack_val_se to what ack processing we are doing
14486 * we also want to track how many cycles we burned. Note
14487 * the bits after tcp_output we let be "free". This is because
14488 * we are also tracking the tcp_output times as well. Note the
14489 * use of 0xf here since we only have 11 counter (0 - 0xa) and
14490 * 0xf cannot be returned and is what we initialize it too to
14491 * indicate we are not doing the tabulations.
14493 if (ack_val_set != 0xf) {
14496 crtsc = get_cyclecount();
14497 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val));
14498 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
14499 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val);
14503 if (nxt_pkt == 0) {
14504 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
14506 if (tcp_output(tp) < 0)
14510 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
14511 rack_free_trim(rack);
14513 /* Update any rounds needed */
14514 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
14515 union tcp_log_stackspecific log;
14518 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
14519 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14520 log.u_bbr.flex1 = high_seq;
14521 log.u_bbr.flex2 = rack->r_ctl.roundends;
14522 log.u_bbr.flex3 = rack->r_ctl.current_round;
14523 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround;
14524 log.u_bbr.flex8 = 9;
14525 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
14526 0, &log, false, NULL, NULL, 0, &tv);
14529 * The draft (v3) calls for us to use SEQ_GEQ, but that
14530 * causes issues when we are just going app limited. Lets
14531 * instead use SEQ_GT <or> where its equal but more data
14534 if ((SEQ_GT(tp->snd_una, rack->r_ctl.roundends)) ||
14535 ((tp->snd_una == rack->r_ctl.roundends) && SEQ_GT(tp->snd_max, tp->snd_una))) {
14536 rack->r_ctl.current_round++;
14537 rack->r_ctl.roundends = tp->snd_max;
14538 if (CC_ALGO(tp)->newround != NULL) {
14539 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round);
14542 if ((nxt_pkt == 0) &&
14543 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
14544 (SEQ_GT(tp->snd_max, tp->snd_una) ||
14545 (tp->t_flags & TF_DELACK) ||
14546 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
14547 (tp->t_state <= TCPS_CLOSING)))) {
14548 /* We could not send (probably in the hpts but stopped the timer earlier)? */
14549 if ((tp->snd_max == tp->snd_una) &&
14550 ((tp->t_flags & TF_DELACK) == 0) &&
14551 (tcp_in_hpts(rack->rc_inp)) &&
14552 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
14553 /* keep alive not needed if we are hptsi output yet */
14557 if (tcp_in_hpts(rack->rc_inp)) {
14558 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
14559 us_cts = tcp_get_usecs(NULL);
14560 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
14562 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
14565 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
14567 tcp_hpts_remove(tp->t_inpcb);
14569 if (late && (did_out == 0)) {
14571 * We are late in the sending
14572 * and we did not call the output
14573 * (this probably should not happen).
14575 goto do_output_now;
14577 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
14580 } else if (nxt_pkt == 0) {
14581 /* Do we have the correct timer running? */
14582 rack_timer_audit(tp, rack, &so->so_snd);
14586 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs));
14588 rack->r_wanted_output = 0;
14590 if (tp->t_inpcb == NULL) {
14591 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d",
14593 retval, tp, prev_state);
14596 #ifdef TCP_ACCOUNTING
14599 * Track the time (see above).
14601 if (ack_val_set != 0xf) {
14604 crtsc = get_cyclecount();
14605 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val));
14607 * Note we *DO NOT* increment the per-tcb counters since
14608 * in the else the TP may be gone!!
14613 #ifdef TCP_ACCOUNTING
14620 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
14621 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos)
14625 /* First lets see if we have old packets */
14626 if (tp->t_in_pkt) {
14627 if (ctf_do_queued_segments(so, tp, 1)) {
14632 if (m->m_flags & M_TSTMP_LRO) {
14633 mbuf_tstmp2timeval(m, &tv);
14635 /* Should not be should we kassert instead? */
14636 tcp_get_usecs(&tv);
14638 if (rack_do_segment_nounlock(m, th, so, tp,
14639 drop_hdrlen, tlen, iptos, 0, &tv) == 0) {
14640 INP_WUNLOCK(tp->t_inpcb);
14644 struct rack_sendmap *
14645 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
14647 struct rack_sendmap *rsm = NULL;
14649 uint32_t srtt = 0, thresh = 0, ts_low = 0;
14651 /* Return the next guy to be re-transmitted */
14652 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
14655 if (tp->t_flags & TF_SENTFIN) {
14656 /* retran the end FIN? */
14659 /* ok lets look at this one */
14660 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
14661 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) {
14664 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
14667 rsm = rack_find_lowest_rsm(rack);
14672 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) &&
14673 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
14675 * No sack so we automatically do the 3 strikes and
14676 * retransmit (no rack timer would be started).
14681 if (rsm->r_flags & RACK_ACKED) {
14684 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) &&
14685 (rsm->r_dupack < DUP_ACK_THRESHOLD)) {
14686 /* Its not yet ready */
14689 srtt = rack_grab_rtt(tp, rack);
14690 idx = rsm->r_rtr_cnt - 1;
14691 ts_low = (uint32_t)rsm->r_tim_lastsent[idx];
14692 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
14693 if ((tsused == ts_low) ||
14694 (TSTMP_LT(tsused, ts_low))) {
14695 /* No time since sending */
14698 if ((tsused - ts_low) < thresh) {
14699 /* It has not been long enough yet */
14702 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
14703 ((rsm->r_flags & RACK_SACK_PASSED) &&
14704 (rack->sack_attack_disable == 0))) {
14706 * We have passed the dup-ack threshold <or>
14707 * a SACK has indicated this is missing.
14708 * Note that if you are a declared attacker
14709 * it is only the dup-ack threshold that
14710 * will cause retransmits.
14712 /* log retransmit reason */
14713 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1);
14714 rack->r_fast_output = 0;
14721 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
14722 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method,
14723 int line, struct rack_sendmap *rsm, uint8_t quality)
14725 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
14726 union tcp_log_stackspecific log;
14729 memset(&log, 0, sizeof(log));
14730 log.u_bbr.flex1 = slot;
14731 log.u_bbr.flex2 = len;
14732 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs;
14733 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs;
14734 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss;
14735 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca;
14736 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data;
14737 log.u_bbr.use_lt_bw <<= 1;
14738 log.u_bbr.use_lt_bw |= rack->r_late;
14739 log.u_bbr.use_lt_bw <<= 1;
14740 log.u_bbr.use_lt_bw |= rack->r_early;
14741 log.u_bbr.use_lt_bw <<= 1;
14742 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
14743 log.u_bbr.use_lt_bw <<= 1;
14744 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
14745 log.u_bbr.use_lt_bw <<= 1;
14746 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
14747 log.u_bbr.use_lt_bw <<= 1;
14748 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
14749 log.u_bbr.use_lt_bw <<= 1;
14750 log.u_bbr.use_lt_bw |= rack->gp_ready;
14751 log.u_bbr.pkt_epoch = line;
14752 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed;
14753 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early;
14754 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec;
14755 log.u_bbr.bw_inuse = bw_est;
14756 log.u_bbr.delRate = bw;
14757 if (rack->r_ctl.gp_bw == 0)
14758 log.u_bbr.cur_del_rate = 0;
14760 log.u_bbr.cur_del_rate = rack_get_bw(rack);
14761 log.u_bbr.rttProp = len_time;
14762 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt;
14763 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit;
14764 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
14765 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) {
14766 /* We are in slow start */
14767 log.u_bbr.flex7 = 1;
14769 /* we are on congestion avoidance */
14770 log.u_bbr.flex7 = 0;
14772 log.u_bbr.flex8 = method;
14773 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14774 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14775 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec;
14776 log.u_bbr.cwnd_gain <<= 1;
14777 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
14778 log.u_bbr.cwnd_gain <<= 1;
14779 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
14780 log.u_bbr.bbr_substate = quality;
14781 TCP_LOG_EVENTP(rack->rc_tp, NULL,
14782 &rack->rc_inp->inp_socket->so_rcv,
14783 &rack->rc_inp->inp_socket->so_snd,
14784 BBR_LOG_HPTSI_CALC, 0,
14785 0, &log, false, &tv);
14790 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss)
14792 uint32_t new_tso, user_max;
14794 user_max = rack->rc_user_set_max_segs * mss;
14795 if (rack->rc_force_max_seg) {
14798 if (rack->use_fixed_rate &&
14799 ((rack->r_ctl.crte == NULL) ||
14800 (bw != rack->r_ctl.crte->rate))) {
14801 /* Use the user mss since we are not exactly matched */
14804 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL);
14805 if (new_tso > user_max)
14806 new_tso = user_max;
14811 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced)
14813 uint64_t lentim, fill_bw;
14815 /* Lets first see if we are full, if so continue with normal rate */
14816 rack->r_via_fill_cw = 0;
14817 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use)
14819 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd)
14821 if (rack->r_ctl.rc_last_us_rtt == 0)
14823 if (rack->rc_pace_fill_if_rttin_range &&
14824 (rack->r_ctl.rc_last_us_rtt >=
14825 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) {
14826 /* The rtt is huge, N * smallest, lets not fill */
14830 * first lets calculate the b/w based on the last us-rtt
14833 fill_bw = rack->r_ctl.cwnd_to_use;
14834 /* Take the rwnd if its smaller */
14835 if (fill_bw > rack->rc_tp->snd_wnd)
14836 fill_bw = rack->rc_tp->snd_wnd;
14837 if (rack->r_fill_less_agg) {
14839 * Now take away the inflight (this will reduce our
14840 * aggressiveness and yeah, if we get that much out in 1RTT
14841 * we will have had acks come back and still be behind).
14843 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14845 /* Now lets make it into a b/w */
14846 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC;
14847 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt;
14848 /* We are below the min b/w */
14850 *rate_wanted = fill_bw;
14851 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted))
14853 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap))
14854 fill_bw = rack->r_ctl.bw_rate_cap;
14855 rack->r_via_fill_cw = 1;
14856 if (rack->r_rack_hw_rate_caps &&
14857 (rack->r_ctl.crte != NULL)) {
14858 uint64_t high_rate;
14860 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
14861 if (fill_bw > high_rate) {
14862 /* We are capping bw at the highest rate table entry */
14863 if (*rate_wanted > high_rate) {
14864 /* The original rate was also capped */
14865 rack->r_via_fill_cw = 0;
14867 rack_log_hdwr_pacing(rack,
14868 fill_bw, high_rate, __LINE__,
14870 fill_bw = high_rate;
14874 } else if ((rack->r_ctl.crte == NULL) &&
14875 (rack->rack_hdrw_pacing == 0) &&
14876 (rack->rack_hdw_pace_ena) &&
14877 rack->r_rack_hw_rate_caps &&
14878 (rack->rack_attempt_hdwr_pace == 0) &&
14879 (rack->rc_inp->inp_route.ro_nh != NULL) &&
14880 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
14882 * Ok we may have a first attempt that is greater than our top rate
14885 uint64_t high_rate;
14887 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
14889 if (fill_bw > high_rate) {
14890 fill_bw = high_rate;
14897 * Ok fill_bw holds our mythical b/w to fill the cwnd
14898 * in a rtt, what does that time wise equate too?
14900 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC;
14902 *rate_wanted = fill_bw;
14903 if (non_paced || (lentim < slot)) {
14904 rack_log_pacing_delay_calc(rack, len, slot, fill_bw,
14905 0, lentim, 12, __LINE__, NULL, 0);
14906 return ((int32_t)lentim);
14912 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz)
14916 int can_start_hw_pacing = 1;
14919 if (rack->rc_always_pace == 0) {
14921 * We use the most optimistic possible cwnd/srtt for
14922 * sending calculations. This will make our
14923 * calculation anticipate getting more through
14924 * quicker then possible. But thats ok we don't want
14925 * the peer to have a gap in data sending.
14927 uint64_t cwnd, tr_perms = 0;
14928 int32_t reduce = 0;
14932 * We keep no precise pacing with the old method
14933 * instead we use the pacer to mitigate bursts.
14935 if (rack->r_ctl.rc_rack_min_rtt)
14936 srtt = rack->r_ctl.rc_rack_min_rtt;
14938 srtt = max(tp->t_srtt, 1);
14939 if (rack->r_ctl.rc_rack_largest_cwnd)
14940 cwnd = rack->r_ctl.rc_rack_largest_cwnd;
14942 cwnd = rack->r_ctl.cwnd_to_use;
14943 /* Inflate cwnd by 1000 so srtt of usecs is in ms */
14944 tr_perms = (cwnd * 1000) / srtt;
14945 if (tr_perms == 0) {
14946 tr_perms = ctf_fixed_maxseg(tp);
14949 * Calculate how long this will take to drain, if
14950 * the calculation comes out to zero, thats ok we
14951 * will use send_a_lot to possibly spin around for
14952 * more increasing tot_len_this_send to the point
14953 * that its going to require a pace, or we hit the
14954 * cwnd. Which in that case we are just waiting for
14957 slot = len / tr_perms;
14958 /* Now do we reduce the time so we don't run dry? */
14959 if (slot && rack_slot_reduction) {
14960 reduce = (slot / rack_slot_reduction);
14961 if (reduce < slot) {
14966 slot *= HPTS_USEC_IN_MSEC;
14967 if (rack->rc_pace_to_cwnd) {
14968 uint64_t rate_wanted = 0;
14970 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1);
14971 rack->rc_ack_can_sendout_data = 1;
14972 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0);
14974 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0);
14976 uint64_t bw_est, res, lentim, rate_wanted;
14977 uint32_t orig_val, segs, oh;
14981 if ((rack->r_rr_config == 1) && rsm) {
14982 return (rack->r_ctl.rc_min_to);
14984 if (rack->use_fixed_rate) {
14985 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack);
14986 } else if ((rack->r_ctl.init_rate == 0) &&
14987 #ifdef NETFLIX_PEAKRATE
14988 (rack->rc_tp->t_maxpeakrate == 0) &&
14990 (rack->r_ctl.gp_bw == 0)) {
14991 /* no way to yet do an estimate */
14992 bw_est = rate_wanted = 0;
14994 bw_est = rack_get_bw(rack);
14995 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped);
14997 if ((bw_est == 0) || (rate_wanted == 0) ||
14998 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) {
15000 * No way yet to make a b/w estimate or
15001 * our raise is set incorrectly.
15005 /* We need to account for all the overheads */
15006 segs = (len + segsiz - 1) / segsiz;
15008 * We need the diff between 1514 bytes (e-mtu with e-hdr)
15009 * and how much data we put in each packet. Yes this
15010 * means we may be off if we are larger than 1500 bytes
15011 * or smaller. But this just makes us more conservative.
15013 if (rack_hw_rate_min &&
15014 (bw_est < rack_hw_rate_min))
15015 can_start_hw_pacing = 0;
15016 if (ETHERNET_SEGMENT_SIZE > segsiz)
15017 oh = ETHERNET_SEGMENT_SIZE - segsiz;
15021 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC;
15022 res = lentim / rate_wanted;
15023 slot = (uint32_t)res;
15024 orig_val = rack->r_ctl.rc_pace_max_segs;
15025 if (rack->r_ctl.crte == NULL) {
15027 * Only do this if we are not hardware pacing
15028 * since if we are doing hw-pacing below we will
15029 * set make a call after setting up or changing
15032 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
15033 } else if (rack->rc_inp->inp_snd_tag == NULL) {
15035 * We lost our rate somehow, this can happen
15036 * if the interface changed underneath us.
15038 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
15039 rack->r_ctl.crte = NULL;
15040 /* Lets re-allow attempting to setup pacing */
15041 rack->rack_hdrw_pacing = 0;
15042 rack->rack_attempt_hdwr_pace = 0;
15043 rack_log_hdwr_pacing(rack,
15044 rate_wanted, bw_est, __LINE__,
15047 /* Did we change the TSO size, if so log it */
15048 if (rack->r_ctl.rc_pace_max_segs != orig_val)
15049 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL, 0);
15050 prev_fill = rack->r_via_fill_cw;
15051 if ((rack->rc_pace_to_cwnd) &&
15053 (rack->use_fixed_rate == 0) &&
15054 (rack->in_probe_rtt == 0) &&
15055 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) {
15057 * We want to pace at our rate *or* faster to
15058 * fill the cwnd to the max if its not full.
15060 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0);
15062 if ((rack->rc_inp->inp_route.ro_nh != NULL) &&
15063 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
15064 if ((rack->rack_hdw_pace_ena) &&
15065 (can_start_hw_pacing > 0) &&
15066 (rack->rack_hdrw_pacing == 0) &&
15067 (rack->rack_attempt_hdwr_pace == 0)) {
15069 * Lets attempt to turn on hardware pacing
15072 rack->rack_attempt_hdwr_pace = 1;
15073 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp,
15074 rack->rc_inp->inp_route.ro_nh->nh_ifp,
15077 &err, &rack->r_ctl.crte_prev_rate);
15078 if (rack->r_ctl.crte) {
15079 rack->rack_hdrw_pacing = 1;
15080 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz,
15081 0, rack->r_ctl.crte,
15083 rack_log_hdwr_pacing(rack,
15084 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
15086 rack->r_ctl.last_hw_bw_req = rate_wanted;
15088 counter_u64_add(rack_hw_pace_init_fail, 1);
15090 } else if (rack->rack_hdrw_pacing &&
15091 (rack->r_ctl.last_hw_bw_req != rate_wanted)) {
15092 /* Do we need to adjust our rate? */
15093 const struct tcp_hwrate_limit_table *nrte;
15095 if (rack->r_up_only &&
15096 (rate_wanted < rack->r_ctl.crte->rate)) {
15098 * We have four possible states here
15099 * having to do with the previous time
15101 * previous | this-time
15102 * A) 0 | 0 -- fill_cw not in the picture
15103 * B) 1 | 0 -- we were doing a fill-cw but now are not
15104 * C) 1 | 1 -- all rates from fill_cw
15105 * D) 0 | 1 -- we were doing non-fill and now we are filling
15107 * For case A, C and D we don't allow a drop. But for
15108 * case B where we now our on our steady rate we do
15112 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0)))
15115 if ((rate_wanted > rack->r_ctl.crte->rate) ||
15116 (rate_wanted <= rack->r_ctl.crte_prev_rate)) {
15117 if (rack_hw_rate_to_low &&
15118 (bw_est < rack_hw_rate_to_low)) {
15120 * The pacing rate is too low for hardware, but
15121 * do allow hardware pacing to be restarted.
15123 rack_log_hdwr_pacing(rack,
15124 bw_est, rack->r_ctl.crte->rate, __LINE__,
15126 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
15127 rack->r_ctl.crte = NULL;
15128 rack->rack_attempt_hdwr_pace = 0;
15129 rack->rack_hdrw_pacing = 0;
15130 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
15133 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte,
15135 rack->rc_inp->inp_route.ro_nh->nh_ifp,
15138 &err, &rack->r_ctl.crte_prev_rate);
15139 if (nrte == NULL) {
15140 /* Lost the rate */
15141 rack->rack_hdrw_pacing = 0;
15142 rack->r_ctl.crte = NULL;
15143 rack_log_hdwr_pacing(rack,
15144 rate_wanted, 0, __LINE__,
15146 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
15147 counter_u64_add(rack_hw_pace_lost, 1);
15148 } else if (nrte != rack->r_ctl.crte) {
15149 rack->r_ctl.crte = nrte;
15150 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted,
15154 rack_log_hdwr_pacing(rack,
15155 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
15157 rack->r_ctl.last_hw_bw_req = rate_wanted;
15160 /* We just need to adjust the segment size */
15161 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
15162 rack_log_hdwr_pacing(rack,
15163 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
15165 rack->r_ctl.last_hw_bw_req = rate_wanted;
15169 if ((rack->r_ctl.crte != NULL) &&
15170 (rack->r_ctl.crte->rate == rate_wanted)) {
15172 * We need to add a extra if the rates
15173 * are exactly matched. The idea is
15174 * we want the software to make sure the
15175 * queue is empty before adding more, this
15176 * gives us N MSS extra pace times where
15179 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots);
15182 if (rack_limit_time_with_srtt &&
15183 (rack->use_fixed_rate == 0) &&
15184 #ifdef NETFLIX_PEAKRATE
15185 (rack->rc_tp->t_maxpeakrate == 0) &&
15187 (rack->rack_hdrw_pacing == 0)) {
15189 * Sanity check, we do not allow the pacing delay
15190 * to be longer than the SRTT of the path. If it is
15191 * a slow path, then adding a packet should increase
15192 * the RTT and compensate for this i.e. the srtt will
15193 * be greater so the allowed pacing time will be greater.
15195 * Note this restriction is not for where a peak rate
15196 * is set, we are doing fixed pacing or hardware pacing.
15198 if (rack->rc_tp->t_srtt)
15199 srtt = rack->rc_tp->t_srtt;
15201 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */
15202 if (srtt < (uint64_t)slot) {
15203 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0);
15207 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0);
15209 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) {
15211 * If this rate is seeing enobufs when it
15212 * goes to send then either the nic is out
15213 * of gas or we are mis-estimating the time
15214 * somehow and not letting the queue empty
15215 * completely. Lets add to the pacing time.
15217 int hw_boost_delay;
15219 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult;
15220 if (hw_boost_delay > rack_enobuf_hw_max)
15221 hw_boost_delay = rack_enobuf_hw_max;
15222 else if (hw_boost_delay < rack_enobuf_hw_min)
15223 hw_boost_delay = rack_enobuf_hw_min;
15224 slot += hw_boost_delay;
15230 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack,
15231 tcp_seq startseq, uint32_t sb_offset)
15233 struct rack_sendmap *my_rsm = NULL;
15234 struct rack_sendmap fe;
15236 if (tp->t_state < TCPS_ESTABLISHED) {
15238 * We don't start any measurements if we are
15239 * not at least established.
15243 if (tp->t_state >= TCPS_FIN_WAIT_1) {
15245 * We will get no more data into the SB
15246 * this means we need to have the data available
15247 * before we start a measurement.
15250 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) <
15251 max(rc_init_window(rack),
15252 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) {
15253 /* Nope not enough data */
15257 tp->t_flags |= TF_GPUTINPROG;
15258 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
15259 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
15260 tp->gput_seq = startseq;
15261 rack->app_limited_needs_set = 0;
15262 if (rack->in_probe_rtt)
15263 rack->measure_saw_probe_rtt = 1;
15264 else if ((rack->measure_saw_probe_rtt) &&
15265 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
15266 rack->measure_saw_probe_rtt = 0;
15267 if (rack->rc_gp_filled)
15268 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
15270 /* Special case initial measurement */
15273 tp->gput_ts = tcp_get_usecs(&tv);
15274 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
15277 * We take a guess out into the future,
15278 * if we have no measurement and no
15279 * initial rate, we measure the first
15280 * initial-windows worth of data to
15281 * speed up getting some GP measurement and
15282 * thus start pacing.
15284 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) {
15285 rack->app_limited_needs_set = 1;
15286 tp->gput_ack = startseq + max(rc_init_window(rack),
15287 (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
15288 rack_log_pacing_delay_calc(rack,
15293 rack->r_ctl.rc_app_limited_cnt,
15295 __LINE__, NULL, 0);
15300 * We are out somewhere in the sb
15301 * can we use the already outstanding data?
15303 if (rack->r_ctl.rc_app_limited_cnt == 0) {
15305 * Yes first one is good and in this case
15306 * the tp->gput_ts is correctly set based on
15307 * the last ack that arrived (no need to
15308 * set things up when an ack comes in).
15310 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
15311 if ((my_rsm == NULL) ||
15312 (my_rsm->r_rtr_cnt != 1)) {
15313 /* retransmission? */
15317 if (rack->r_ctl.rc_first_appl == NULL) {
15319 * If rc_first_appl is NULL
15320 * then the cnt should be 0.
15321 * This is probably an error, maybe
15322 * a KASSERT would be approprate.
15327 * If we have a marker pointer to the last one that is
15328 * app limited we can use that, but we need to set
15329 * things up so that when it gets ack'ed we record
15330 * the ack time (if its not already acked).
15332 rack->app_limited_needs_set = 1;
15334 * We want to get to the rsm that is either
15335 * next with space i.e. over 1 MSS or the one
15336 * after that (after the app-limited).
15338 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
15339 rack->r_ctl.rc_first_appl);
15341 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp))
15342 /* Have to use the next one */
15343 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
15346 /* Use after the first MSS of it is acked */
15347 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp);
15351 if ((my_rsm == NULL) ||
15352 (my_rsm->r_rtr_cnt != 1)) {
15354 * Either its a retransmit or
15355 * the last is the app-limited one.
15360 tp->gput_seq = my_rsm->r_start;
15362 if (my_rsm->r_flags & RACK_ACKED) {
15364 * This one has been acked use the arrival ack time
15366 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
15367 rack->app_limited_needs_set = 0;
15369 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)];
15370 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
15371 rack_log_pacing_delay_calc(rack,
15376 rack->r_ctl.rc_app_limited_cnt,
15378 __LINE__, NULL, 0);
15384 * We don't know how long we may have been
15385 * idle or if this is the first-send. Lets
15386 * setup the flag so we will trim off
15387 * the first ack'd data so we get a true
15390 rack->app_limited_needs_set = 1;
15391 tp->gput_ack = startseq + rack_get_measure_window(tp, rack);
15392 /* Find this guy so we can pull the send time */
15393 fe.r_start = startseq;
15394 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
15396 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)];
15397 if (my_rsm->r_flags & RACK_ACKED) {
15399 * Unlikely since its probably what was
15400 * just transmitted (but I am paranoid).
15402 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
15403 rack->app_limited_needs_set = 0;
15405 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) {
15406 /* This also is unlikely */
15407 tp->gput_seq = my_rsm->r_start;
15411 * TSNH unless we have some send-map limit,
15412 * and even at that it should not be hitting
15413 * that limit (we should have stopped sending).
15418 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
15420 rack_log_pacing_delay_calc(rack,
15425 rack->r_ctl.rc_app_limited_cnt,
15426 9, __LINE__, NULL, 0);
15429 static inline uint32_t
15430 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use,
15431 uint32_t avail, int32_t sb_offset)
15436 if (tp->snd_wnd > cwnd_to_use)
15437 sendwin = cwnd_to_use;
15439 sendwin = tp->snd_wnd;
15440 if (ctf_outstanding(tp) >= tp->snd_wnd) {
15441 /* We never want to go over our peers rcv-window */
15446 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
15447 if (flight >= sendwin) {
15449 * We have in flight what we are allowed by cwnd (if
15450 * it was rwnd blocking it would have hit above out
15455 len = sendwin - flight;
15456 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) {
15457 /* We would send too much (beyond the rwnd) */
15458 len = tp->snd_wnd - ctf_outstanding(tp);
15460 if ((len + sb_offset) > avail) {
15462 * We don't have that much in the SB, how much is
15465 len = avail - sb_offset;
15472 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags,
15473 unsigned ipoptlen, int32_t orig_len, int32_t len, int error,
15474 int rsm_is_null, int optlen, int line, uint16_t mode)
15476 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
15477 union tcp_log_stackspecific log;
15480 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15481 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
15482 log.u_bbr.flex1 = error;
15483 log.u_bbr.flex2 = flags;
15484 log.u_bbr.flex3 = rsm_is_null;
15485 log.u_bbr.flex4 = ipoptlen;
15486 log.u_bbr.flex5 = tp->rcv_numsacks;
15487 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
15488 log.u_bbr.flex7 = optlen;
15489 log.u_bbr.flex8 = rack->r_fsb_inited;
15490 log.u_bbr.applimited = rack->r_fast_output;
15491 log.u_bbr.bw_inuse = rack_get_bw(rack);
15492 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
15493 log.u_bbr.cwnd_gain = mode;
15494 log.u_bbr.pkts_out = orig_len;
15495 log.u_bbr.lt_epoch = len;
15496 log.u_bbr.delivered = line;
15497 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
15498 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
15499 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0,
15500 len, &log, false, NULL, NULL, 0, &tv);
15505 static struct mbuf *
15506 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen,
15507 struct rack_fast_send_blk *fsb,
15508 int32_t seglimit, int32_t segsize, int hw_tls)
15511 struct ktls_session *tls, *ntls;
15513 struct mbuf *start;
15516 struct mbuf *m, *n, **np, *smb;
15519 int32_t len = *plen;
15521 int32_t len_cp = 0;
15522 uint32_t mlen, frags;
15524 soff = off = the_off;
15529 if (hw_tls && (m->m_flags & M_EXTPG))
15530 tls = m->m_epg_tls;
15544 if (m->m_flags & M_EXTPG)
15545 ntls = m->m_epg_tls;
15550 * Avoid mixing TLS records with handshake
15551 * data or TLS records from different
15561 mlen = min(len, m->m_len - off);
15564 * For M_EXTPG mbufs, add 3 segments
15565 * + 1 in case we are crossing page boundaries
15566 * + 2 in case the TLS hdr/trailer are used
15567 * It is cheaper to just add the segments
15568 * than it is to take the cache miss to look
15569 * at the mbuf ext_pgs state in detail.
15571 if (m->m_flags & M_EXTPG) {
15572 fragsize = min(segsize, PAGE_SIZE);
15575 fragsize = segsize;
15579 /* Break if we really can't fit anymore. */
15580 if ((frags + 1) >= seglimit) {
15586 * Reduce size if you can't copy the whole
15587 * mbuf. If we can't copy the whole mbuf, also
15588 * adjust len so the loop will end after this
15591 if ((frags + howmany(mlen, fragsize)) >= seglimit) {
15592 mlen = (seglimit - frags - 1) * fragsize;
15594 *plen = len_cp + len;
15596 frags += howmany(mlen, fragsize);
15600 KASSERT(seglimit > 0,
15601 ("%s: seglimit went too low", __func__));
15603 n = m_get(M_NOWAIT, m->m_type);
15609 len_cp += n->m_len;
15610 if (m->m_flags & (M_EXT|M_EXTPG)) {
15611 n->m_data = m->m_data + off;
15614 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
15621 if (len || (soff == smb->m_len)) {
15623 * We have more so we move forward or
15624 * we have consumed the entire mbuf and
15625 * len has fell to 0.
15637 * Save off the size of the mbuf. We do
15638 * this so that we can recognize when it
15639 * has been trimmed by sbcut() as acks
15642 fsb->o_m_len = smb->m_len;
15645 * This is the case where the next mbuf went to NULL. This
15646 * means with this copy we have sent everything in the sb.
15647 * In theory we could clear the fast_output flag, but lets
15648 * not since its possible that we could get more added
15649 * and acks that call the extend function which would let
15664 * This is a copy of m_copym(), taking the TSO segment size/limit
15665 * constraints into account, and advancing the sndptr as it goes.
15667 static struct mbuf *
15668 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen,
15669 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff)
15671 struct mbuf *m, *n;
15674 soff = rack->r_ctl.fsb.off;
15675 m = rack->r_ctl.fsb.m;
15676 if (rack->r_ctl.fsb.o_m_len > m->m_len) {
15678 * The mbuf had the front of it chopped off by an ack
15679 * we need to adjust the soff/off by that difference.
15683 delta = rack->r_ctl.fsb.o_m_len - m->m_len;
15685 } else if (rack->r_ctl.fsb.o_m_len < m->m_len) {
15687 * The mbuf was expanded probably by
15688 * a m_compress. Just update o_m_len.
15690 rack->r_ctl.fsb.o_m_len = m->m_len;
15692 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff));
15693 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen));
15694 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?",
15696 rack, *plen, m, m->m_len));
15697 /* Save off the right location before we copy and advance */
15699 *s_mb = rack->r_ctl.fsb.m;
15700 n = rack_fo_base_copym(m, soff, plen,
15702 seglimit, segsize, rack->r_ctl.fsb.hw_tls);
15707 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm,
15708 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp)
15711 * Enter the fast retransmit path. We are given that a sched_pin is
15712 * in place (if accounting is compliled in) and the cycle count taken
15713 * at the entry is in the ts_val. The concept her is that the rsm
15714 * now holds the mbuf offsets and such so we can directly transmit
15715 * without a lot of overhead, the len field is already set for
15716 * us to prohibit us from sending too much (usually its 1MSS).
15718 struct ip *ip = NULL;
15719 struct udphdr *udp = NULL;
15720 struct tcphdr *th = NULL;
15721 struct mbuf *m = NULL;
15724 struct tcp_log_buffer *lgb;
15725 #ifdef TCP_ACCOUNTING
15730 u_char opt[TCP_MAXOLEN];
15731 uint32_t hdrlen, optlen;
15732 int32_t slot, segsiz, max_val, tso = 0, error, ulen = 0;
15734 uint32_t if_hw_tsomaxsegcount = 0, startseq;
15735 uint32_t if_hw_tsomaxsegsize;
15738 struct ip6_hdr *ip6 = NULL;
15740 if (rack->r_is_v6) {
15741 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
15742 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
15746 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
15747 hdrlen = sizeof(struct tcpiphdr);
15749 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
15753 /* Its a TLP add the flag, it may already be there but be sure */
15754 rsm->r_flags |= RACK_TLP;
15756 /* If it was a TLP it is not not on this retransmit */
15757 rsm->r_flags &= ~RACK_TLP;
15759 startseq = rsm->r_start;
15760 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
15761 inp = rack->rc_inp;
15763 flags = tcp_outflags[tp->t_state];
15764 if (flags & (TH_SYN|TH_RST)) {
15767 if (rsm->r_flags & RACK_HAS_FIN) {
15768 /* We can't send a FIN here */
15771 if (flags & TH_FIN) {
15772 /* We never send a FIN */
15775 if (tp->t_flags & TF_RCVD_TSTMP) {
15776 to.to_tsval = ms_cts + tp->ts_offset;
15777 to.to_tsecr = tp->ts_recent;
15778 to.to_flags = TOF_TS;
15780 optlen = tcp_addoptions(&to, opt);
15782 udp = rack->r_ctl.fsb.udp;
15784 hdrlen += sizeof(struct udphdr);
15785 if (rack->r_ctl.rc_pace_max_segs)
15786 max_val = rack->r_ctl.rc_pace_max_segs;
15787 else if (rack->rc_user_set_max_segs)
15788 max_val = rack->rc_user_set_max_segs * segsiz;
15791 if ((tp->t_flags & TF_TSO) &&
15797 if (MHLEN < hdrlen + max_linkhdr)
15798 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
15801 m = m_gethdr(M_NOWAIT, MT_DATA);
15804 m->m_data += max_linkhdr;
15806 th = rack->r_ctl.fsb.th;
15807 /* Establish the len to send */
15810 if ((tso) && (len + optlen > tp->t_maxseg)) {
15811 uint32_t if_hw_tsomax;
15814 /* extract TSO information */
15815 if_hw_tsomax = tp->t_tsomax;
15816 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
15817 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
15819 * Check if we should limit by maximum payload
15822 if (if_hw_tsomax != 0) {
15823 /* compute maximum TSO length */
15824 max_len = (if_hw_tsomax - hdrlen -
15826 if (max_len <= 0) {
15828 } else if (len > max_len) {
15832 if (len <= segsiz) {
15834 * In case there are too many small fragments don't
15842 if ((tso == 0) && (len > segsiz))
15845 (len <= MHLEN - hdrlen - max_linkhdr)) {
15848 th->th_seq = htonl(rsm->r_start);
15849 th->th_ack = htonl(tp->rcv_nxt);
15851 * The PUSH bit should only be applied
15852 * if the full retransmission is made. If
15853 * we are sending less than this is the
15854 * left hand edge and should not have
15857 if ((rsm->r_flags & RACK_HAD_PUSH) &&
15858 (len == (rsm->r_end - rsm->r_start)))
15860 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
15861 if (th->th_win == 0) {
15862 tp->t_sndzerowin++;
15863 tp->t_flags |= TF_RXWIN0SENT;
15865 tp->t_flags &= ~TF_RXWIN0SENT;
15866 if (rsm->r_flags & RACK_TLP) {
15868 * TLP should not count in retran count, but
15871 counter_u64_add(rack_tlp_retran, 1);
15872 counter_u64_add(rack_tlp_retran_bytes, len);
15874 tp->t_sndrexmitpack++;
15875 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
15876 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
15879 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
15882 if (rsm->m == NULL)
15884 if (rsm->orig_m_len != rsm->m->m_len) {
15885 /* Fix up the orig_m_len and possibly the mbuf offset */
15886 rack_adjust_orig_mlen(rsm);
15888 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls);
15889 if (len <= segsiz) {
15891 * Must have ran out of mbufs for the copy
15892 * shorten it to no longer need tso. Lets
15893 * not put on sendalot since we are low on
15898 if ((m->m_next == NULL) || (len <= 0)){
15903 ulen = hdrlen + len - sizeof(struct ip6_hdr);
15905 ulen = hdrlen + len - sizeof(struct ip);
15906 udp->uh_ulen = htons(ulen);
15908 m->m_pkthdr.rcvif = (struct ifnet *)0;
15909 if (TCPS_HAVERCVDSYN(tp->t_state) &&
15910 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
15911 int ect = tcp_ecn_output_established(tp, &flags, len, true);
15912 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
15913 (tp->t_flags2 & TF2_ECN_SND_ECE))
15914 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
15916 if (rack->r_is_v6) {
15917 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
15918 ip6->ip6_flow |= htonl(ect << 20);
15923 ip->ip_tos &= ~IPTOS_ECN_MASK;
15927 tcp_set_flags(th, flags);
15928 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
15930 if (rack->r_is_v6) {
15932 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
15933 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15934 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
15935 th->th_sum = htons(0);
15936 UDPSTAT_INC(udps_opackets);
15938 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
15939 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15940 th->th_sum = in6_cksum_pseudo(ip6,
15941 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
15946 #if defined(INET6) && defined(INET)
15952 m->m_pkthdr.csum_flags = CSUM_UDP;
15953 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15954 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
15955 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
15956 th->th_sum = htons(0);
15957 UDPSTAT_INC(udps_opackets);
15959 m->m_pkthdr.csum_flags = CSUM_TCP;
15960 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15961 th->th_sum = in_pseudo(ip->ip_src.s_addr,
15962 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
15963 IPPROTO_TCP + len + optlen));
15965 /* IP version must be set here for ipv4/ipv6 checking later */
15966 KASSERT(ip->ip_v == IPVERSION,
15967 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
15971 KASSERT(len > tp->t_maxseg - optlen,
15972 ("%s: len <= tso_segsz tp:%p", __func__, tp));
15973 m->m_pkthdr.csum_flags |= CSUM_TSO;
15974 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
15977 if (rack->r_is_v6) {
15978 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
15979 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
15980 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
15981 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15983 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15986 #if defined(INET) && defined(INET6)
15991 ip->ip_len = htons(m->m_pkthdr.len);
15992 ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
15993 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
15994 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15995 if (tp->t_port == 0 || len < V_tcp_minmss) {
15996 ip->ip_off |= htons(IP_DF);
15999 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
16003 /* Time to copy in our header */
16004 cpto = mtod(m, uint8_t *);
16005 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
16006 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
16008 bcopy(opt, th + 1, optlen);
16009 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
16011 th->th_off = sizeof(struct tcphdr) >> 2;
16013 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
16014 union tcp_log_stackspecific log;
16016 if (rsm->r_flags & RACK_RWND_COLLAPSED) {
16017 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm);
16018 counter_u64_add(rack_collapsed_win_rxt, 1);
16019 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start));
16021 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
16022 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
16023 if (rack->rack_no_prr)
16024 log.u_bbr.flex1 = 0;
16026 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
16027 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
16028 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
16029 log.u_bbr.flex4 = max_val;
16030 log.u_bbr.flex5 = 0;
16031 /* Save off the early/late values */
16032 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
16033 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
16034 log.u_bbr.bw_inuse = rack_get_bw(rack);
16035 if (doing_tlp == 0)
16036 log.u_bbr.flex8 = 1;
16038 log.u_bbr.flex8 = 2;
16039 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
16040 log.u_bbr.flex7 = 55;
16041 log.u_bbr.pkts_out = tp->t_maxseg;
16042 log.u_bbr.timeStamp = cts;
16043 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
16044 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
16045 log.u_bbr.delivered = 0;
16046 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
16047 len, &log, false, NULL, NULL, 0, tv);
16051 if (rack->r_is_v6) {
16052 error = ip6_output(m, NULL,
16054 0, NULL, NULL, inp);
16057 #if defined(INET) && defined(INET6)
16062 error = ip_output(m, NULL,
16069 lgb->tlb_errno = error;
16075 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv),
16076 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls);
16077 if (doing_tlp && (rack->fast_rsm_hack == 0)) {
16078 rack->rc_tlp_in_progress = 1;
16079 rack->r_ctl.rc_tlp_cnt_out++;
16082 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls);
16084 rack->rc_last_sent_tlp_past_cumack = 0;
16085 rack->rc_last_sent_tlp_seq_valid = 1;
16086 rack->r_ctl.last_sent_tlp_seq = rsm->r_start;
16087 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start;
16090 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
16091 rack->forced_ack = 0; /* If we send something zap the FA flag */
16092 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
16093 rack->r_ctl.retran_during_recovery += len;
16097 idx = (len / segsiz) + 3;
16098 if (idx >= TCP_MSS_ACCT_ATIMER)
16099 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
16101 counter_u64_add(rack_out_size[idx], 1);
16103 if (tp->t_rtttime == 0) {
16104 tp->t_rtttime = ticks;
16105 tp->t_rtseq = startseq;
16106 KMOD_TCPSTAT_INC(tcps_segstimed);
16108 counter_u64_add(rack_fto_rsm_send, 1);
16109 if (error && (error == ENOBUFS)) {
16110 if (rack->r_ctl.crte != NULL) {
16111 rack_trace_point(rack, RACK_TP_HWENOBUF);
16113 rack_trace_point(rack, RACK_TP_ENOBUF);
16114 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
16115 if (rack->rc_enobuf < 0x7f)
16117 if (slot < (10 * HPTS_USEC_IN_MSEC))
16118 slot = 10 * HPTS_USEC_IN_MSEC;
16120 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz);
16122 (rack->rc_always_pace == 0) ||
16123 (rack->r_rr_config == 1)) {
16125 * We have no pacing set or we
16126 * are using old-style rack or
16127 * we are overridden to use the old 1ms pacing.
16129 slot = rack->r_ctl.rc_min_to;
16131 rack_start_hpts_timer(rack, tp, cts, slot, len, 0);
16132 #ifdef TCP_ACCOUNTING
16133 crtsc = get_cyclecount();
16134 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16135 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
16137 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru);
16138 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16139 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
16141 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val));
16142 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16143 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz);
16145 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz));
16156 rack_sndbuf_autoscale(struct tcp_rack *rack)
16159 * Automatic sizing of send socket buffer. Often the send buffer
16160 * size is not optimally adjusted to the actual network conditions
16161 * at hand (delay bandwidth product). Setting the buffer size too
16162 * small limits throughput on links with high bandwidth and high
16163 * delay (eg. trans-continental/oceanic links). Setting the
16164 * buffer size too big consumes too much real kernel memory,
16165 * especially with many connections on busy servers.
16167 * The criteria to step up the send buffer one notch are:
16168 * 1. receive window of remote host is larger than send buffer
16169 * (with a fudge factor of 5/4th);
16170 * 2. send buffer is filled to 7/8th with data (so we actually
16171 * have data to make use of it);
16172 * 3. send buffer fill has not hit maximal automatic size;
16173 * 4. our send window (slow start and cogestion controlled) is
16174 * larger than sent but unacknowledged data in send buffer.
16176 * Note that the rack version moves things much faster since
16177 * we want to avoid hitting cache lines in the rack_fast_output()
16178 * path so this is called much less often and thus moves
16179 * the SB forward by a percentage.
16183 uint32_t sendwin, scaleup;
16186 so = rack->rc_inp->inp_socket;
16187 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd);
16188 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
16189 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
16190 sbused(&so->so_snd) >=
16191 (so->so_snd.sb_hiwat / 8 * 7) &&
16192 sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
16193 sendwin >= (sbused(&so->so_snd) -
16194 (tp->snd_nxt - tp->snd_una))) {
16195 if (rack_autosndbuf_inc)
16196 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100;
16198 scaleup = V_tcp_autosndbuf_inc;
16199 if (scaleup < V_tcp_autosndbuf_inc)
16200 scaleup = V_tcp_autosndbuf_inc;
16201 scaleup += so->so_snd.sb_hiwat;
16202 if (scaleup > V_tcp_autosndbuf_max)
16203 scaleup = V_tcp_autosndbuf_max;
16204 if (!sbreserve_locked(so, SO_SND, scaleup, curthread))
16205 so->so_snd.sb_flags &= ~SB_AUTOSIZE;
16211 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val,
16212 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err)
16215 * Enter to do fast output. We are given that the sched_pin is
16216 * in place (if accounting is compiled in) and the cycle count taken
16217 * at entry is in place in ts_val. The idea here is that
16218 * we know how many more bytes needs to be sent (presumably either
16219 * during pacing or to fill the cwnd and that was greater than
16220 * the max-burst). We have how much to send and all the info we
16221 * need to just send.
16223 struct ip *ip = NULL;
16224 struct udphdr *udp = NULL;
16225 struct tcphdr *th = NULL;
16226 struct mbuf *m, *s_mb;
16229 struct tcp_log_buffer *lgb;
16230 #ifdef TCP_ACCOUNTING
16234 u_char opt[TCP_MAXOLEN];
16235 uint32_t hdrlen, optlen;
16236 #ifdef TCP_ACCOUNTING
16239 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0;
16242 uint32_t if_hw_tsomaxsegcount = 0, startseq;
16243 uint32_t if_hw_tsomaxsegsize;
16244 uint16_t add_flag = RACK_SENT_FP;
16246 struct ip6_hdr *ip6 = NULL;
16248 if (rack->r_is_v6) {
16249 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
16250 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
16254 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
16255 hdrlen = sizeof(struct tcpiphdr);
16257 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
16261 startseq = tp->snd_max;
16262 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
16263 inp = rack->rc_inp;
16264 len = rack->r_ctl.fsb.left_to_send;
16266 flags = rack->r_ctl.fsb.tcp_flags;
16267 if (tp->t_flags & TF_RCVD_TSTMP) {
16268 to.to_tsval = ms_cts + tp->ts_offset;
16269 to.to_tsecr = tp->ts_recent;
16270 to.to_flags = TOF_TS;
16272 optlen = tcp_addoptions(&to, opt);
16274 udp = rack->r_ctl.fsb.udp;
16276 hdrlen += sizeof(struct udphdr);
16277 if (rack->r_ctl.rc_pace_max_segs)
16278 max_val = rack->r_ctl.rc_pace_max_segs;
16279 else if (rack->rc_user_set_max_segs)
16280 max_val = rack->rc_user_set_max_segs * segsiz;
16283 if ((tp->t_flags & TF_TSO) &&
16290 if (MHLEN < hdrlen + max_linkhdr)
16291 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
16294 m = m_gethdr(M_NOWAIT, MT_DATA);
16297 m->m_data += max_linkhdr;
16299 th = rack->r_ctl.fsb.th;
16300 /* Establish the len to send */
16303 if ((tso) && (len + optlen > tp->t_maxseg)) {
16304 uint32_t if_hw_tsomax;
16307 /* extract TSO information */
16308 if_hw_tsomax = tp->t_tsomax;
16309 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
16310 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
16312 * Check if we should limit by maximum payload
16315 if (if_hw_tsomax != 0) {
16316 /* compute maximum TSO length */
16317 max_len = (if_hw_tsomax - hdrlen -
16319 if (max_len <= 0) {
16321 } else if (len > max_len) {
16325 if (len <= segsiz) {
16327 * In case there are too many small fragments don't
16335 if ((tso == 0) && (len > segsiz))
16338 (len <= MHLEN - hdrlen - max_linkhdr)) {
16341 sb_offset = tp->snd_max - tp->snd_una;
16342 th->th_seq = htonl(tp->snd_max);
16343 th->th_ack = htonl(tp->rcv_nxt);
16344 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
16345 if (th->th_win == 0) {
16346 tp->t_sndzerowin++;
16347 tp->t_flags |= TF_RXWIN0SENT;
16349 tp->t_flags &= ~TF_RXWIN0SENT;
16350 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
16351 KMOD_TCPSTAT_INC(tcps_sndpack);
16352 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
16354 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
16357 if (rack->r_ctl.fsb.m == NULL)
16360 /* s_mb and s_soff are saved for rack_log_output */
16361 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize,
16363 if (len <= segsiz) {
16365 * Must have ran out of mbufs for the copy
16366 * shorten it to no longer need tso. Lets
16367 * not put on sendalot since we are low on
16372 if (rack->r_ctl.fsb.rfo_apply_push &&
16373 (len == rack->r_ctl.fsb.left_to_send)) {
16375 add_flag |= RACK_HAD_PUSH;
16377 if ((m->m_next == NULL) || (len <= 0)){
16382 ulen = hdrlen + len - sizeof(struct ip6_hdr);
16384 ulen = hdrlen + len - sizeof(struct ip);
16385 udp->uh_ulen = htons(ulen);
16387 m->m_pkthdr.rcvif = (struct ifnet *)0;
16388 if (TCPS_HAVERCVDSYN(tp->t_state) &&
16389 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
16390 int ect = tcp_ecn_output_established(tp, &flags, len, false);
16391 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
16392 (tp->t_flags2 & TF2_ECN_SND_ECE))
16393 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
16395 if (rack->r_is_v6) {
16396 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
16397 ip6->ip6_flow |= htonl(ect << 20);
16402 ip->ip_tos &= ~IPTOS_ECN_MASK;
16406 tcp_set_flags(th, flags);
16407 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
16409 if (rack->r_is_v6) {
16411 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
16412 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
16413 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
16414 th->th_sum = htons(0);
16415 UDPSTAT_INC(udps_opackets);
16417 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
16418 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
16419 th->th_sum = in6_cksum_pseudo(ip6,
16420 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
16425 #if defined(INET6) && defined(INET)
16431 m->m_pkthdr.csum_flags = CSUM_UDP;
16432 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
16433 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
16434 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
16435 th->th_sum = htons(0);
16436 UDPSTAT_INC(udps_opackets);
16438 m->m_pkthdr.csum_flags = CSUM_TCP;
16439 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
16440 th->th_sum = in_pseudo(ip->ip_src.s_addr,
16441 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
16442 IPPROTO_TCP + len + optlen));
16444 /* IP version must be set here for ipv4/ipv6 checking later */
16445 KASSERT(ip->ip_v == IPVERSION,
16446 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
16450 KASSERT(len > tp->t_maxseg - optlen,
16451 ("%s: len <= tso_segsz tp:%p", __func__, tp));
16452 m->m_pkthdr.csum_flags |= CSUM_TSO;
16453 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
16456 if (rack->r_is_v6) {
16457 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
16458 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
16459 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
16460 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
16462 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
16465 #if defined(INET) && defined(INET6)
16470 ip->ip_len = htons(m->m_pkthdr.len);
16471 ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
16472 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
16473 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
16474 if (tp->t_port == 0 || len < V_tcp_minmss) {
16475 ip->ip_off |= htons(IP_DF);
16478 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
16482 /* Time to copy in our header */
16483 cpto = mtod(m, uint8_t *);
16484 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
16485 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
16487 bcopy(opt, th + 1, optlen);
16488 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
16490 th->th_off = sizeof(struct tcphdr) >> 2;
16492 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
16493 union tcp_log_stackspecific log;
16495 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
16496 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
16497 if (rack->rack_no_prr)
16498 log.u_bbr.flex1 = 0;
16500 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
16501 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
16502 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
16503 log.u_bbr.flex4 = max_val;
16504 log.u_bbr.flex5 = 0;
16505 /* Save off the early/late values */
16506 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
16507 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
16508 log.u_bbr.bw_inuse = rack_get_bw(rack);
16509 log.u_bbr.flex8 = 0;
16510 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
16511 log.u_bbr.flex7 = 44;
16512 log.u_bbr.pkts_out = tp->t_maxseg;
16513 log.u_bbr.timeStamp = cts;
16514 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
16515 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
16516 log.u_bbr.delivered = 0;
16517 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
16518 len, &log, false, NULL, NULL, 0, tv);
16522 if (rack->r_is_v6) {
16523 error = ip6_output(m, NULL,
16525 0, NULL, NULL, inp);
16528 #if defined(INET) && defined(INET6)
16533 error = ip_output(m, NULL,
16539 lgb->tlb_errno = error;
16547 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv),
16548 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls);
16550 if (tp->snd_una == tp->snd_max) {
16551 rack->r_ctl.rc_tlp_rxt_last_time = cts;
16552 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
16553 tp->t_acktime = ticks;
16556 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls);
16558 rack->forced_ack = 0; /* If we send something zap the FA flag */
16560 if ((tp->t_flags & TF_GPUTINPROG) == 0)
16561 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset);
16562 tp->snd_max += len;
16563 tp->snd_nxt = tp->snd_max;
16567 idx = (len / segsiz) + 3;
16568 if (idx >= TCP_MSS_ACCT_ATIMER)
16569 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
16571 counter_u64_add(rack_out_size[idx], 1);
16573 if (len <= rack->r_ctl.fsb.left_to_send)
16574 rack->r_ctl.fsb.left_to_send -= len;
16576 rack->r_ctl.fsb.left_to_send = 0;
16577 if (rack->r_ctl.fsb.left_to_send < segsiz) {
16578 rack->r_fast_output = 0;
16579 rack->r_ctl.fsb.left_to_send = 0;
16580 /* At the end of fast_output scale up the sb */
16581 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd);
16582 rack_sndbuf_autoscale(rack);
16583 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd);
16585 if (tp->t_rtttime == 0) {
16586 tp->t_rtttime = ticks;
16587 tp->t_rtseq = startseq;
16588 KMOD_TCPSTAT_INC(tcps_segstimed);
16590 if ((rack->r_ctl.fsb.left_to_send >= segsiz) &&
16595 th = rack->r_ctl.fsb.th;
16596 #ifdef TCP_ACCOUNTING
16601 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
16602 counter_u64_add(rack_fto_send, 1);
16603 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz);
16604 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0);
16605 #ifdef TCP_ACCOUNTING
16606 crtsc = get_cyclecount();
16607 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16608 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
16610 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru);
16611 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16612 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
16614 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val));
16615 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16616 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz);
16618 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz));
16625 rack->r_fast_output = 0;
16629 static struct rack_sendmap *
16630 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts)
16632 struct rack_sendmap *rsm = NULL;
16633 struct rack_sendmap fe;
16637 fe.r_start = rack->r_ctl.last_collapse_point;
16638 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
16639 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) {
16640 /* Nothing, strange turn off validity */
16641 rack->r_collapse_point_valid = 0;
16644 /* Can we send it yet? */
16645 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) {
16647 * Receiver window has not grown enough for
16648 * the segment to be put on the wire.
16652 if (rsm->r_flags & RACK_ACKED) {
16654 * It has been sacked, lets move to the
16655 * next one if possible.
16657 rack->r_ctl.last_collapse_point = rsm->r_end;
16659 if (SEQ_GEQ(rack->r_ctl.last_collapse_point,
16660 rack->r_ctl.high_collapse_point)) {
16661 rack->r_collapse_point_valid = 0;
16666 /* Now has it been long enough ? */
16667 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts);
16668 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) {
16669 rack_log_collapse(rack, rsm->r_start,
16670 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])),
16671 thresh, __LINE__, 6, rsm->r_flags, rsm);
16674 /* Not enough time */
16675 rack_log_collapse(rack, rsm->r_start,
16676 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])),
16677 thresh, __LINE__, 7, rsm->r_flags, rsm);
16682 rack_output(struct tcpcb *tp)
16686 uint32_t sb_offset, s_moff = 0;
16687 int32_t len, error = 0;
16689 struct mbuf *m, *s_mb = NULL;
16691 uint32_t if_hw_tsomaxsegcount = 0;
16692 uint32_t if_hw_tsomaxsegsize;
16693 int32_t segsiz, minseg;
16694 long tot_len_this_send = 0;
16696 struct ip *ip = NULL;
16698 struct udphdr *udp = NULL;
16699 struct tcp_rack *rack;
16703 uint8_t wanted_cookie = 0;
16704 u_char opt[TCP_MAXOLEN];
16705 unsigned ipoptlen, optlen, hdrlen, ulen=0;
16708 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
16709 unsigned ipsec_optlen = 0;
16712 int32_t idle, sendalot;
16713 int32_t sub_from_prr = 0;
16714 volatile int32_t sack_rxmit;
16715 struct rack_sendmap *rsm = NULL;
16719 int32_t sup_rack = 0;
16720 uint32_t cts, ms_cts, delayed, early;
16721 uint16_t add_flag = RACK_SENT_SP;
16722 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */
16723 uint8_t hpts_calling, doing_tlp = 0;
16724 uint32_t cwnd_to_use, pace_max_seg;
16725 int32_t do_a_prefetch = 0;
16726 int32_t prefetch_rsm = 0;
16727 int32_t orig_len = 0;
16729 int32_t prefetch_so_done = 0;
16730 struct tcp_log_buffer *lgb;
16732 struct sockbuf *sb;
16733 uint64_t ts_val = 0;
16734 #ifdef TCP_ACCOUNTING
16738 struct ip6_hdr *ip6 = NULL;
16741 bool hw_tls = false;
16743 /* setup and take the cache hits here */
16744 rack = (struct tcp_rack *)tp->t_fb_ptr;
16745 #ifdef TCP_ACCOUNTING
16747 ts_val = get_cyclecount();
16749 hpts_calling = rack->rc_inp->inp_hpts_calls;
16750 NET_EPOCH_ASSERT();
16751 INP_WLOCK_ASSERT(rack->rc_inp);
16753 if (tp->t_flags & TF_TOE) {
16754 #ifdef TCP_ACCOUNTING
16757 return (tcp_offload_output(tp));
16761 * For TFO connections in SYN_RECEIVED, only allow the initial
16762 * SYN|ACK and those sent by the retransmit timer.
16764 if (IS_FASTOPEN(tp->t_flags) &&
16765 (tp->t_state == TCPS_SYN_RECEIVED) &&
16766 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */
16767 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */
16768 #ifdef TCP_ACCOUNTING
16774 if (rack->r_state) {
16775 /* Use the cache line loaded if possible */
16776 isipv6 = rack->r_is_v6;
16778 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0;
16782 cts = tcp_get_usecs(&tv);
16783 ms_cts = tcp_tv_to_mssectick(&tv);
16784 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
16785 tcp_in_hpts(rack->rc_inp)) {
16787 * We are on the hpts for some timer but not hptsi output.
16788 * Remove from the hpts unconditionally.
16790 rack_timer_cancel(tp, rack, cts, __LINE__);
16792 /* Are we pacing and late? */
16793 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
16794 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) {
16795 /* We are delayed */
16796 delayed = cts - rack->r_ctl.rc_last_output_to;
16800 /* Do the timers, which may override the pacer */
16801 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
16804 retval = rack_process_timers(tp, rack, cts, hpts_calling,
16807 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
16808 #ifdef TCP_ACCOUNTING
16812 * If timers want tcp_drop(), then pass error out,
16813 * otherwise suppress it.
16815 return (retval < 0 ? retval : 0);
16818 if (rack->rc_in_persist) {
16819 if (tcp_in_hpts(rack->rc_inp) == 0) {
16820 /* Timer is not running */
16821 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
16823 #ifdef TCP_ACCOUNTING
16828 if ((rack->rc_ack_required == 1) &&
16829 (rack->r_timer_override == 0)){
16830 /* A timeout occurred and no ack has arrived */
16831 if (tcp_in_hpts(rack->rc_inp) == 0) {
16832 /* Timer is not running */
16833 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
16835 #ifdef TCP_ACCOUNTING
16840 if ((rack->r_timer_override) ||
16841 (rack->rc_ack_can_sendout_data) ||
16843 (tp->t_state < TCPS_ESTABLISHED)) {
16844 rack->rc_ack_can_sendout_data = 0;
16845 if (tcp_in_hpts(rack->rc_inp))
16846 tcp_hpts_remove(rack->rc_inp);
16847 } else if (tcp_in_hpts(rack->rc_inp)) {
16849 * On the hpts you can't pass even if ACKNOW is on, we will
16850 * when the hpts fires.
16852 #ifdef TCP_ACCOUNTING
16853 crtsc = get_cyclecount();
16854 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16855 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val);
16857 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val));
16858 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16859 tp->tcp_cnt_counters[SND_BLOCKED]++;
16861 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1);
16864 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
16867 rack->rc_inp->inp_hpts_calls = 0;
16868 /* Finish out both pacing early and late accounting */
16869 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
16870 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) {
16871 early = rack->r_ctl.rc_last_output_to - cts;
16875 rack->r_ctl.rc_agg_delayed += delayed;
16877 } else if (early) {
16878 rack->r_ctl.rc_agg_early += early;
16881 /* Now that early/late accounting is done turn off the flag */
16882 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
16883 rack->r_wanted_output = 0;
16884 rack->r_timer_override = 0;
16885 if ((tp->t_state != rack->r_state) &&
16886 TCPS_HAVEESTABLISHED(tp->t_state)) {
16887 rack_set_state(tp, rack);
16889 if ((rack->r_fast_output) &&
16890 (doing_tlp == 0) &&
16891 (tp->rcv_numsacks == 0)) {
16895 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
16899 inp = rack->rc_inp;
16900 so = inp->inp_socket;
16905 inp = rack->rc_inp;
16907 * For TFO connections in SYN_SENT or SYN_RECEIVED,
16908 * only allow the initial SYN or SYN|ACK and those sent
16909 * by the retransmit timer.
16911 if (IS_FASTOPEN(tp->t_flags) &&
16912 ((tp->t_state == TCPS_SYN_RECEIVED) ||
16913 (tp->t_state == TCPS_SYN_SENT)) &&
16914 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */
16915 (tp->t_rxtshift == 0)) { /* not a retransmit */
16916 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
16917 so = inp->inp_socket;
16919 goto just_return_nolock;
16922 * Determine length of data that should be transmitted, and flags
16923 * that will be used. If there is some data or critical controls
16924 * (SYN, RST) to send, then transmit; otherwise, investigate
16927 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
16928 if (tp->t_idle_reduce) {
16929 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur))
16930 rack_cc_after_idle(rack, tp);
16932 tp->t_flags &= ~TF_LASTIDLE;
16934 if (tp->t_flags & TF_MORETOCOME) {
16935 tp->t_flags |= TF_LASTIDLE;
16939 if ((tp->snd_una == tp->snd_max) &&
16940 rack->r_ctl.rc_went_idle_time &&
16941 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) {
16942 idle = cts - rack->r_ctl.rc_went_idle_time;
16943 if (idle > rack_min_probertt_hold) {
16944 /* Count as a probe rtt */
16945 if (rack->in_probe_rtt == 0) {
16946 rack->r_ctl.rc_lower_rtt_us_cts = cts;
16947 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
16948 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
16949 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
16951 rack_exit_probertt(rack, cts);
16956 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED))
16957 rack_init_fsb_block(tp, rack);
16960 * If we've recently taken a timeout, snd_max will be greater than
16961 * snd_nxt. There may be SACK information that allows us to avoid
16962 * resending already delivered data. Adjust snd_nxt accordingly.
16965 cts = tcp_get_usecs(&tv);
16966 ms_cts = tcp_tv_to_mssectick(&tv);
16969 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
16971 if (rack->r_ctl.rc_pace_max_segs == 0)
16972 pace_max_seg = rack->rc_user_set_max_segs * segsiz;
16974 pace_max_seg = rack->r_ctl.rc_pace_max_segs;
16975 sb_offset = tp->snd_max - tp->snd_una;
16976 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
16977 flags = tcp_outflags[tp->t_state];
16978 while (rack->rc_free_cnt < rack_free_cache) {
16979 rsm = rack_alloc(rack);
16981 if (inp->inp_hpts_calls)
16982 /* Retry in a ms */
16983 slot = (1 * HPTS_USEC_IN_MSEC);
16984 so = inp->inp_socket;
16986 goto just_return_nolock;
16988 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
16989 rack->rc_free_cnt++;
16992 if (inp->inp_hpts_calls)
16993 inp->inp_hpts_calls = 0;
16997 if (flags & TH_RST) {
16998 SOCKBUF_LOCK(&inp->inp_socket->so_snd);
16999 so = inp->inp_socket;
17003 if (rack->r_ctl.rc_resend) {
17004 /* Retransmit timer */
17005 rsm = rack->r_ctl.rc_resend;
17006 rack->r_ctl.rc_resend = NULL;
17007 len = rsm->r_end - rsm->r_start;
17010 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
17011 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
17012 __func__, __LINE__,
17013 rsm->r_start, tp->snd_una, tp, rack, rsm));
17014 sb_offset = rsm->r_start - tp->snd_una;
17017 } else if (rack->r_collapse_point_valid &&
17018 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) {
17020 * If an RSM is returned then enough time has passed
17021 * for us to retransmit it. Move up the collapse point,
17022 * since this rsm has its chance to retransmit now.
17024 rack_trace_point(rack, RACK_TP_COLLAPSED_RXT);
17025 rack->r_ctl.last_collapse_point = rsm->r_end;
17027 if (SEQ_GEQ(rack->r_ctl.last_collapse_point,
17028 rack->r_ctl.high_collapse_point))
17029 rack->r_collapse_point_valid = 0;
17031 /* We are not doing a TLP */
17033 len = rsm->r_end - rsm->r_start;
17034 sb_offset = rsm->r_start - tp->snd_una;
17036 if ((rack->full_size_rxt == 0) &&
17037 (rack->shape_rxt_to_pacing_min == 0) &&
17040 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) {
17041 /* We have a retransmit that takes precedence */
17042 if ((!IN_FASTRECOVERY(tp->t_flags)) &&
17043 ((rsm->r_flags & RACK_MUST_RXT) == 0) &&
17044 ((tp->t_flags & TF_WASFRECOVERY) == 0)) {
17045 /* Enter recovery if not induced by a time-out */
17046 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
17049 if (SEQ_LT(rsm->r_start, tp->snd_una)) {
17050 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
17051 tp, rack, rsm, rsm->r_start, tp->snd_una);
17054 len = rsm->r_end - rsm->r_start;
17055 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
17056 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
17057 __func__, __LINE__,
17058 rsm->r_start, tp->snd_una, tp, rack, rsm));
17059 sb_offset = rsm->r_start - tp->snd_una;
17065 KMOD_TCPSTAT_INC(tcps_sack_rexmits);
17066 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes,
17069 } else if (rack->r_ctl.rc_tlpsend) {
17070 /* Tail loss probe */
17075 * Check if we can do a TLP with a RACK'd packet
17076 * this can happen if we are not doing the rack
17077 * cheat and we skipped to a TLP and it
17080 rsm = rack->r_ctl.rc_tlpsend;
17081 /* We are doing a TLP make sure the flag is preent */
17082 rsm->r_flags |= RACK_TLP;
17083 rack->r_ctl.rc_tlpsend = NULL;
17085 tlen = rsm->r_end - rsm->r_start;
17088 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
17089 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
17090 __func__, __LINE__,
17091 rsm->r_start, tp->snd_una, tp, rack, rsm));
17092 sb_offset = rsm->r_start - tp->snd_una;
17093 cwin = min(tp->snd_wnd, tlen);
17096 if (rack->r_must_retran &&
17097 (doing_tlp == 0) &&
17098 (SEQ_GT(tp->snd_max, tp->snd_una)) &&
17101 * There are two different ways that we
17102 * can get into this block:
17103 * a) This is a non-sack connection, we had a time-out
17104 * and thus r_must_retran was set and everything
17105 * left outstanding as been marked for retransmit.
17106 * b) The MTU of the path shrank, so that everything
17107 * was marked to be retransmitted with the smaller
17108 * mtu and r_must_retran was set.
17110 * This means that we expect the sendmap (outstanding)
17111 * to all be marked must. We can use the tmap to
17115 int sendwin, flight;
17117 sendwin = min(tp->snd_wnd, tp->snd_cwnd);
17118 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto);
17119 if (flight >= sendwin) {
17121 * We can't send yet.
17123 so = inp->inp_socket;
17125 goto just_return_nolock;
17128 * This is the case a/b mentioned above. All
17129 * outstanding/not-acked should be marked.
17130 * We can use the tmap to find them.
17132 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
17135 rack->r_must_retran = 0;
17136 rack->r_ctl.rc_out_at_rto = 0;
17137 so = inp->inp_socket;
17139 goto just_return_nolock;
17141 if ((rsm->r_flags & RACK_MUST_RXT) == 0) {
17143 * The first one does not have the flag, did we collapse
17144 * further up in our list?
17146 rack->r_must_retran = 0;
17147 rack->r_ctl.rc_out_at_rto = 0;
17152 len = rsm->r_end - rsm->r_start;
17153 sb_offset = rsm->r_start - tp->snd_una;
17155 if ((rack->full_size_rxt == 0) &&
17156 (rack->shape_rxt_to_pacing_min == 0) &&
17160 * Delay removing the flag RACK_MUST_RXT so
17161 * that the fastpath for retransmit will
17162 * work with this rsm.
17167 * Enforce a connection sendmap count limit if set
17168 * as long as we are not retransmiting.
17170 if ((rsm == NULL) &&
17171 (rack->do_detection == 0) &&
17172 (V_tcp_map_entries_limit > 0) &&
17173 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
17174 counter_u64_add(rack_to_alloc_limited, 1);
17175 if (!rack->alloc_limit_reported) {
17176 rack->alloc_limit_reported = 1;
17177 counter_u64_add(rack_alloc_limited_conns, 1);
17179 so = inp->inp_socket;
17181 goto just_return_nolock;
17183 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
17184 /* we are retransmitting the fin */
17188 * When retransmitting data do *not* include the
17189 * FIN. This could happen from a TLP probe.
17194 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo &&
17195 ((rsm->r_flags & RACK_HAS_FIN) == 0)) {
17198 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp);
17202 so = inp->inp_socket;
17204 if (do_a_prefetch == 0) {
17205 kern_prefetch(sb, &do_a_prefetch);
17208 #ifdef NETFLIX_SHARED_CWND
17209 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) &&
17210 rack->rack_enable_scwnd) {
17211 /* We are doing cwnd sharing */
17212 if (rack->gp_ready &&
17213 (rack->rack_attempted_scwnd == 0) &&
17214 (rack->r_ctl.rc_scw == NULL) &&
17216 /* The pcbid is in, lets make an attempt */
17217 counter_u64_add(rack_try_scwnd, 1);
17218 rack->rack_attempted_scwnd = 1;
17219 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp,
17220 &rack->r_ctl.rc_scw_index,
17223 if (rack->r_ctl.rc_scw &&
17224 (rack->rack_scwnd_is_idle == 1) &&
17225 sbavail(&so->so_snd)) {
17226 /* we are no longer out of data */
17227 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
17228 rack->rack_scwnd_is_idle = 0;
17230 if (rack->r_ctl.rc_scw) {
17231 /* First lets update and get the cwnd */
17232 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw,
17233 rack->r_ctl.rc_scw_index,
17234 tp->snd_cwnd, tp->snd_wnd, segsiz);
17239 * Get standard flags, and add SYN or FIN if requested by 'hidden'
17242 if (tp->t_flags & TF_NEEDFIN)
17244 if (tp->t_flags & TF_NEEDSYN)
17246 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
17248 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
17250 kern_prefetch(end_rsm, &prefetch_rsm);
17255 * If snd_nxt == snd_max and we have transmitted a FIN, the
17256 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
17257 * negative length. This can also occur when TCP opens up its
17258 * congestion window while receiving additional duplicate acks after
17259 * fast-retransmit because TCP will reset snd_nxt to snd_max after
17260 * the fast-retransmit.
17262 * In the normal retransmit-FIN-only case, however, snd_nxt will be
17263 * set to snd_una, the sb_offset will be 0, and the length may wind
17266 * If sack_rxmit is true we are retransmitting from the scoreboard
17267 * in which case len is already set.
17269 if ((sack_rxmit == 0) &&
17270 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) {
17273 avail = sbavail(sb);
17274 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail)
17275 sb_offset = tp->snd_nxt - tp->snd_una;
17278 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) {
17279 if (rack->r_ctl.rc_tlp_new_data) {
17280 /* TLP is forcing out new data */
17281 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
17282 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
17284 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) {
17285 if (tp->snd_wnd > sb_offset)
17286 len = tp->snd_wnd - sb_offset;
17290 len = rack->r_ctl.rc_tlp_new_data;
17292 rack->r_ctl.rc_tlp_new_data = 0;
17294 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset);
17296 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) {
17298 * For prr=off, we need to send only 1 MSS
17299 * at a time. We do this because another sack could
17300 * be arriving that causes us to send retransmits and
17301 * we don't want to be on a long pace due to a larger send
17302 * that keeps us from sending out the retransmit.
17307 uint32_t outstanding;
17309 * We are inside of a Fast recovery episode, this
17310 * is caused by a SACK or 3 dup acks. At this point
17311 * we have sent all the retransmissions and we rely
17312 * on PRR to dictate what we will send in the form of
17316 outstanding = tp->snd_max - tp->snd_una;
17317 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) {
17318 if (tp->snd_wnd > outstanding) {
17319 len = tp->snd_wnd - outstanding;
17320 /* Check to see if we have the data */
17321 if ((sb_offset + len) > avail) {
17322 /* It does not all fit */
17323 if (avail > sb_offset)
17324 len = avail - sb_offset;
17331 } else if (avail > sb_offset) {
17332 len = avail - sb_offset;
17337 if (len > rack->r_ctl.rc_prr_sndcnt) {
17338 len = rack->r_ctl.rc_prr_sndcnt;
17344 if (len > segsiz) {
17346 * We should never send more than a MSS when
17347 * retransmitting or sending new data in prr
17348 * mode unless the override flag is on. Most
17349 * likely the PRR algorithm is not going to
17350 * let us send a lot as well :-)
17352 if (rack->r_ctl.rc_prr_sendalot == 0) {
17355 } else if (len < segsiz) {
17357 * Do we send any? The idea here is if the
17358 * send empty's the socket buffer we want to
17359 * do it. However if not then lets just wait
17360 * for our prr_sndcnt to get bigger.
17364 leftinsb = sbavail(sb) - sb_offset;
17365 if (leftinsb > len) {
17366 /* This send does not empty the sb */
17371 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) {
17373 * If you have not established
17374 * and are not doing FAST OPEN
17377 if ((sack_rxmit == 0) &&
17378 (!IS_FASTOPEN(tp->t_flags))){
17383 if (prefetch_so_done == 0) {
17384 kern_prefetch(so, &prefetch_so_done);
17385 prefetch_so_done = 1;
17388 * Lop off SYN bit if it has already been sent. However, if this is
17389 * SYN-SENT state and if segment contains data and if we don't know
17390 * that foreign host supports TAO, suppress sending segment.
17392 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) &&
17393 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) {
17395 * When sending additional segments following a TFO SYN|ACK,
17396 * do not include the SYN bit.
17398 if (IS_FASTOPEN(tp->t_flags) &&
17399 (tp->t_state == TCPS_SYN_RECEIVED))
17403 * Be careful not to send data and/or FIN on SYN segments. This
17404 * measure is needed to prevent interoperability problems with not
17405 * fully conformant TCP implementations.
17407 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
17412 * On TFO sockets, ensure no data is sent in the following cases:
17414 * - When retransmitting SYN|ACK on a passively-created socket
17416 * - When retransmitting SYN on an actively created socket
17418 * - When sending a zero-length cookie (cookie request) on an
17419 * actively created socket
17421 * - When the socket is in the CLOSED state (RST is being sent)
17423 if (IS_FASTOPEN(tp->t_flags) &&
17424 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
17425 ((tp->t_state == TCPS_SYN_SENT) &&
17426 (tp->t_tfo_client_cookie_len == 0)) ||
17427 (flags & TH_RST))) {
17431 /* Without fast-open there should never be data sent on a SYN */
17432 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) {
17433 tp->snd_nxt = tp->iss;
17436 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) {
17437 /* We only send 1 MSS if we have a DSACK block */
17438 add_flag |= RACK_SENT_W_DSACK;
17444 * If FIN has been sent but not acked, but we haven't been
17445 * called to retransmit, len will be < 0. Otherwise, window
17446 * shrank after we sent into it. If window shrank to 0,
17447 * cancel pending retransmit, pull snd_nxt back to (closed)
17448 * window, and set the persist timer if it isn't already
17449 * going. If the window didn't close completely, just wait
17452 * We also do a general check here to ensure that we will
17453 * set the persist timer when we have data to send, but a
17454 * 0-byte window. This makes sure the persist timer is set
17455 * even if the packet hits one of the "goto send" lines
17459 if ((tp->snd_wnd == 0) &&
17460 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
17461 (tp->snd_una == tp->snd_max) &&
17462 (sb_offset < (int)sbavail(sb))) {
17463 rack_enter_persist(tp, rack, cts);
17465 } else if ((rsm == NULL) &&
17466 (doing_tlp == 0) &&
17467 (len < pace_max_seg)) {
17469 * We are not sending a maximum sized segment for
17470 * some reason. Should we not send anything (think
17471 * sws or persists)?
17473 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
17474 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
17476 (len < (int)(sbavail(sb) - sb_offset))) {
17478 * Here the rwnd is less than
17479 * the minimum pacing size, this is not a retransmit,
17480 * we are established and
17481 * the send is not the last in the socket buffer
17482 * we send nothing, and we may enter persists
17483 * if nothing is outstanding.
17486 if (tp->snd_max == tp->snd_una) {
17488 * Nothing out we can
17489 * go into persists.
17491 rack_enter_persist(tp, rack, cts);
17493 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) &&
17494 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
17495 (len < (int)(sbavail(sb) - sb_offset)) &&
17498 * Here we are not retransmitting, and
17499 * the cwnd is not so small that we could
17500 * not send at least a min size (rxt timer
17501 * not having gone off), We have 2 segments or
17502 * more already in flight, its not the tail end
17503 * of the socket buffer and the cwnd is blocking
17504 * us from sending out a minimum pacing segment size.
17505 * Lets not send anything.
17508 } else if (((tp->snd_wnd - ctf_outstanding(tp)) <
17509 min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
17510 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
17511 (len < (int)(sbavail(sb) - sb_offset)) &&
17512 (TCPS_HAVEESTABLISHED(tp->t_state))) {
17514 * Here we have a send window but we have
17515 * filled it up and we can't send another pacing segment.
17516 * We also have in flight more than 2 segments
17517 * and we are not completing the sb i.e. we allow
17518 * the last bytes of the sb to go out even if
17519 * its not a full pacing segment.
17522 } else if ((rack->r_ctl.crte != NULL) &&
17523 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) &&
17524 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) &&
17525 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) &&
17526 (len < (int)(sbavail(sb) - sb_offset))) {
17528 * Here we are doing hardware pacing, this is not a TLP,
17529 * we are not sending a pace max segment size, there is rwnd
17530 * room to send at least N pace_max_seg, the cwnd is greater
17531 * than or equal to a full pacing segments plus 4 mss and we have 2 or
17532 * more segments in flight and its not the tail of the socket buffer.
17534 * We don't want to send instead we need to get more ack's in to
17535 * allow us to send a full pacing segment. Normally, if we are pacing
17536 * about the right speed, we should have finished our pacing
17537 * send as most of the acks have come back if we are at the
17538 * right rate. This is a bit fuzzy since return path delay
17539 * can delay the acks, which is why we want to make sure we
17540 * have cwnd space to have a bit more than a max pace segments in flight.
17542 * If we have not gotten our acks back we are pacing at too high a
17543 * rate delaying will not hurt and will bring our GP estimate down by
17544 * injecting the delay. If we don't do this we will send
17545 * 2 MSS out in response to the acks being clocked in which
17546 * defeats the point of hw-pacing (i.e. to help us get
17547 * larger TSO's out).
17554 /* len will be >= 0 after this point. */
17555 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
17556 rack_sndbuf_autoscale(rack);
17558 * Decide if we can use TCP Segmentation Offloading (if supported by
17561 * TSO may only be used if we are in a pure bulk sending state. The
17562 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
17563 * options prevent using TSO. With TSO the TCP header is the same
17564 * (except for the sequence number) for all generated packets. This
17565 * makes it impossible to transmit any options which vary per
17566 * generated segment or packet.
17568 * IPv4 handling has a clear separation of ip options and ip header
17569 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
17570 * the right thing below to provide length of just ip options and thus
17571 * checking for ipoptlen is enough to decide if ip options are present.
17574 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
17576 * Pre-calculate here as we save another lookup into the darknesses
17577 * of IPsec that way and can actually decide if TSO is ok.
17580 if (isipv6 && IPSEC_ENABLED(ipv6))
17581 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb);
17587 if (IPSEC_ENABLED(ipv4))
17588 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb);
17592 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
17593 ipoptlen += ipsec_optlen;
17595 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz &&
17596 (tp->t_port == 0) &&
17597 ((tp->t_flags & TF_SIGNATURE) == 0) &&
17598 tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
17602 uint32_t outstanding __unused;
17604 outstanding = tp->snd_max - tp->snd_una;
17605 if (tp->t_flags & TF_SENTFIN) {
17607 * If we sent a fin, snd_max is 1 higher than
17613 if ((rsm->r_flags & RACK_HAS_FIN) == 0)
17616 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
17621 recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
17622 (long)TCP_MAXWIN << tp->rcv_scale);
17625 * Sender silly window avoidance. We transmit under the following
17626 * conditions when len is non-zero:
17628 * - We have a full segment (or more with TSO) - This is the last
17629 * buffer in a write()/send() and we are either idle or running
17630 * NODELAY - we've timed out (e.g. persist timer) - we have more
17631 * then 1/2 the maximum send window's worth of data (receiver may be
17632 * limited the window size) - we need to retransmit
17635 if (len >= segsiz) {
17639 * NOTE! on localhost connections an 'ack' from the remote
17640 * end may occur synchronously with the output and cause us
17641 * to flush a buffer queued with moretocome. XXX
17644 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
17645 (idle || (tp->t_flags & TF_NODELAY)) &&
17646 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
17647 (tp->t_flags & TF_NOPUSH) == 0) {
17651 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */
17655 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
17659 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */
17667 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) &&
17668 (ctf_outstanding(tp) < (segsiz * 2))) {
17670 * We have less than two MSS outstanding (delayed ack)
17671 * and our rwnd will not let us send a full sized
17672 * MSS. Lets go ahead and let this small segment
17673 * out because we want to try to have at least two
17674 * packets inflight to not be caught by delayed ack.
17681 * Sending of standalone window updates.
17683 * Window updates are important when we close our window due to a
17684 * full socket buffer and are opening it again after the application
17685 * reads data from it. Once the window has opened again and the
17686 * remote end starts to send again the ACK clock takes over and
17687 * provides the most current window information.
17689 * We must avoid the silly window syndrome whereas every read from
17690 * the receive buffer, no matter how small, causes a window update
17691 * to be sent. We also should avoid sending a flurry of window
17692 * updates when the socket buffer had queued a lot of data and the
17693 * application is doing small reads.
17695 * Prevent a flurry of pointless window updates by only sending an
17696 * update when we can increase the advertized window by more than
17697 * 1/4th of the socket buffer capacity. When the buffer is getting
17698 * full or is very small be more aggressive and send an update
17699 * whenever we can increase by two mss sized segments. In all other
17700 * situations the ACK's to new incoming data will carry further
17701 * window increases.
17703 * Don't send an independent window update if a delayed ACK is
17704 * pending (it will get piggy-backed on it) or the remote side
17705 * already has done a half-close and won't send more data. Skip
17706 * this if the connection is in T/TCP half-open state.
17708 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
17709 !(tp->t_flags & TF_DELACK) &&
17710 !TCPS_HAVERCVDFIN(tp->t_state)) {
17712 * "adv" is the amount we could increase the window, taking
17713 * into account that we are limited by TCP_MAXWIN <<
17720 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
17721 oldwin = (tp->rcv_adv - tp->rcv_nxt);
17725 /* We can't increase the window */
17732 * If the new window size ends up being the same as or less
17733 * than the old size when it is scaled, then don't force
17736 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
17739 if (adv >= (int32_t)(2 * segsiz) &&
17740 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
17741 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
17742 so->so_rcv.sb_hiwat <= 8 * segsiz)) {
17746 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) {
17754 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
17755 * is also a catch-all for the retransmit timer timeout case.
17757 if (tp->t_flags & TF_ACKNOW) {
17761 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
17766 * If our state indicates that FIN should be sent and we have not
17767 * yet done so, then we need to send.
17769 if ((flags & TH_FIN) &&
17770 (tp->snd_nxt == tp->snd_una)) {
17775 * No reason to send a segment, just return.
17778 SOCKBUF_UNLOCK(sb);
17779 just_return_nolock:
17781 int app_limited = CTF_JR_SENT_DATA;
17783 if (tot_len_this_send > 0) {
17784 /* Make sure snd_nxt is up to max */
17785 rack->r_ctl.fsb.recwin = recwin;
17786 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz);
17787 if ((error == 0) &&
17789 ((flags & (TH_SYN|TH_FIN)) == 0) &&
17791 (tp->snd_nxt == tp->snd_max) &&
17792 (tp->rcv_numsacks == 0) &&
17793 rack->r_fsb_inited &&
17794 TCPS_HAVEESTABLISHED(tp->t_state) &&
17795 (rack->r_must_retran == 0) &&
17796 ((tp->t_flags & TF_NEEDFIN) == 0) &&
17797 (len > 0) && (orig_len > 0) &&
17798 (orig_len > len) &&
17799 ((orig_len - len) >= segsiz) &&
17801 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
17802 /* We can send at least one more MSS using our fsb */
17804 rack->r_fast_output = 1;
17805 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
17806 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
17807 rack->r_ctl.fsb.tcp_flags = flags;
17808 rack->r_ctl.fsb.left_to_send = orig_len - len;
17810 rack->r_ctl.fsb.hw_tls = 1;
17812 rack->r_ctl.fsb.hw_tls = 0;
17813 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
17814 ("rack:%p left_to_send:%u sbavail:%u out:%u",
17815 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
17816 (tp->snd_max - tp->snd_una)));
17817 if (rack->r_ctl.fsb.left_to_send < segsiz)
17818 rack->r_fast_output = 0;
17820 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
17821 rack->r_ctl.fsb.rfo_apply_push = 1;
17823 rack->r_ctl.fsb.rfo_apply_push = 0;
17826 rack->r_fast_output = 0;
17829 rack_log_fsb(rack, tp, so, flags,
17830 ipoptlen, orig_len, len, 0,
17831 1, optlen, __LINE__, 1);
17832 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
17833 tp->snd_nxt = tp->snd_max;
17835 int end_window = 0;
17836 uint32_t seq = tp->gput_ack;
17838 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
17841 * Mark the last sent that we just-returned (hinting
17842 * that delayed ack may play a role in any rtt measurement).
17844 rsm->r_just_ret = 1;
17846 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
17847 rack->r_ctl.rc_agg_delayed = 0;
17850 rack->r_ctl.rc_agg_early = 0;
17851 if ((ctf_outstanding(tp) +
17852 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)),
17853 minseg)) >= tp->snd_wnd) {
17854 /* We are limited by the rwnd */
17855 app_limited = CTF_JR_RWND_LIMITED;
17856 if (IN_FASTRECOVERY(tp->t_flags))
17857 rack->r_ctl.rc_prr_sndcnt = 0;
17858 } else if (ctf_outstanding(tp) >= sbavail(sb)) {
17859 /* We are limited by whats available -- app limited */
17860 app_limited = CTF_JR_APP_LIMITED;
17861 if (IN_FASTRECOVERY(tp->t_flags))
17862 rack->r_ctl.rc_prr_sndcnt = 0;
17863 } else if ((idle == 0) &&
17864 ((tp->t_flags & TF_NODELAY) == 0) &&
17865 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
17868 * No delay is not on and the
17869 * user is sending less than 1MSS. This
17870 * brings out SWS avoidance so we
17871 * don't send. Another app-limited case.
17873 app_limited = CTF_JR_APP_LIMITED;
17874 } else if (tp->t_flags & TF_NOPUSH) {
17876 * The user has requested no push of
17877 * the last segment and we are
17878 * at the last segment. Another app
17881 app_limited = CTF_JR_APP_LIMITED;
17882 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) {
17884 app_limited = CTF_JR_CWND_LIMITED;
17885 } else if (IN_FASTRECOVERY(tp->t_flags) &&
17886 (rack->rack_no_prr == 0) &&
17887 (rack->r_ctl.rc_prr_sndcnt < segsiz)) {
17888 app_limited = CTF_JR_PRR;
17890 /* Now why here are we not sending? */
17893 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use);
17896 app_limited = CTF_JR_ASSESSING;
17899 * App limited in some fashion, for our pacing GP
17900 * measurements we don't want any gap (even cwnd).
17901 * Close down the measurement window.
17903 if (rack_cwnd_block_ends_measure &&
17904 ((app_limited == CTF_JR_CWND_LIMITED) ||
17905 (app_limited == CTF_JR_PRR))) {
17907 * The reason we are not sending is
17908 * the cwnd (or prr). We have been configured
17909 * to end the measurement window in
17913 } else if (rack_rwnd_block_ends_measure &&
17914 (app_limited == CTF_JR_RWND_LIMITED)) {
17916 * We are rwnd limited and have been
17917 * configured to end the measurement
17918 * window in this case.
17921 } else if (app_limited == CTF_JR_APP_LIMITED) {
17923 * A true application limited period, we have
17927 } else if (app_limited == CTF_JR_ASSESSING) {
17929 * In the assessing case we hit the end of
17930 * the if/else and had no known reason
17931 * This will panic us under invariants..
17933 * If we get this out in logs we need to
17934 * investagate which reason we missed.
17941 /* Adjust the Gput measurement */
17942 if ((tp->t_flags & TF_GPUTINPROG) &&
17943 SEQ_GT(tp->gput_ack, tp->snd_max)) {
17944 tp->gput_ack = tp->snd_max;
17945 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) {
17947 * There is not enough to measure.
17949 tp->t_flags &= ~TF_GPUTINPROG;
17950 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
17951 rack->r_ctl.rc_gp_srtt /*flex1*/,
17953 0, 0, 18, __LINE__, NULL, 0);
17957 /* Mark the last packet has app limited */
17958 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
17959 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
17960 if (rack->r_ctl.rc_app_limited_cnt == 0)
17961 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
17964 * Go out to the end app limited and mark
17965 * this new one as next and move the end_appl up
17968 if (rack->r_ctl.rc_end_appl)
17969 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
17970 rack->r_ctl.rc_end_appl = rsm;
17972 rsm->r_flags |= RACK_APP_LIMITED;
17973 rack->r_ctl.rc_app_limited_cnt++;
17976 rack_log_pacing_delay_calc(rack,
17977 rack->r_ctl.rc_app_limited_cnt, seq,
17978 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0);
17981 /* Check if we need to go into persists or not */
17982 if ((tp->snd_max == tp->snd_una) &&
17983 TCPS_HAVEESTABLISHED(tp->t_state) &&
17985 (sbavail(sb) > tp->snd_wnd) &&
17986 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) {
17987 /* Yes lets make sure to move to persist before timer-start */
17988 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
17990 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack);
17991 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use);
17993 #ifdef NETFLIX_SHARED_CWND
17994 if ((sbavail(sb) == 0) &&
17995 rack->r_ctl.rc_scw) {
17996 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
17997 rack->rack_scwnd_is_idle = 1;
18000 #ifdef TCP_ACCOUNTING
18001 if (tot_len_this_send > 0) {
18002 crtsc = get_cyclecount();
18003 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18004 tp->tcp_cnt_counters[SND_OUT_DATA]++;
18006 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1);
18007 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18008 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
18010 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val));
18011 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18012 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz);
18014 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz));
18016 crtsc = get_cyclecount();
18017 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18018 tp->tcp_cnt_counters[SND_LIMITED]++;
18020 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1);
18021 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18022 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val);
18024 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val));
18031 if (rsm || sack_rxmit)
18032 counter_u64_add(rack_nfto_resend, 1);
18034 counter_u64_add(rack_non_fto_send, 1);
18035 if ((flags & TH_FIN) &&
18038 * We do not transmit a FIN
18039 * with data outstanding. We
18040 * need to make it so all data
18045 /* Enforce stack imposed max seg size if we have one */
18046 if (rack->r_ctl.rc_pace_max_segs &&
18047 (len > rack->r_ctl.rc_pace_max_segs)) {
18049 len = rack->r_ctl.rc_pace_max_segs;
18051 SOCKBUF_LOCK_ASSERT(sb);
18054 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
18056 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
18059 * Before ESTABLISHED, force sending of initial options unless TCP
18060 * set not to do any options. NOTE: we assume that the IP/TCP header
18061 * plus TCP options always fit in a single mbuf, leaving room for a
18062 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
18063 * + optlen <= MCLBYTES
18068 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
18071 hdrlen = sizeof(struct tcpiphdr);
18074 * Compute options for segment. We only have to care about SYN and
18075 * established connection segments. Options for SYN-ACK segments
18076 * are handled in TCP syncache.
18079 if ((tp->t_flags & TF_NOOPT) == 0) {
18080 /* Maximum segment size. */
18081 if (flags & TH_SYN) {
18082 tp->snd_nxt = tp->iss;
18083 to.to_mss = tcp_mssopt(&inp->inp_inc);
18085 to.to_mss -= V_tcp_udp_tunneling_overhead;
18086 to.to_flags |= TOF_MSS;
18089 * On SYN or SYN|ACK transmits on TFO connections,
18090 * only include the TFO option if it is not a
18091 * retransmit, as the presence of the TFO option may
18092 * have caused the original SYN or SYN|ACK to have
18093 * been dropped by a middlebox.
18095 if (IS_FASTOPEN(tp->t_flags) &&
18096 (tp->t_rxtshift == 0)) {
18097 if (tp->t_state == TCPS_SYN_RECEIVED) {
18098 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
18100 (u_int8_t *)&tp->t_tfo_cookie.server;
18101 to.to_flags |= TOF_FASTOPEN;
18103 } else if (tp->t_state == TCPS_SYN_SENT) {
18105 tp->t_tfo_client_cookie_len;
18107 tp->t_tfo_cookie.client;
18108 to.to_flags |= TOF_FASTOPEN;
18111 * If we wind up having more data to
18112 * send with the SYN than can fit in
18113 * one segment, don't send any more
18114 * until the SYN|ACK comes back from
18121 /* Window scaling. */
18122 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
18123 to.to_wscale = tp->request_r_scale;
18124 to.to_flags |= TOF_SCALE;
18127 if ((tp->t_flags & TF_RCVD_TSTMP) ||
18128 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
18129 to.to_tsval = ms_cts + tp->ts_offset;
18130 to.to_tsecr = tp->ts_recent;
18131 to.to_flags |= TOF_TS;
18133 /* Set receive buffer autosizing timestamp. */
18134 if (tp->rfbuf_ts == 0 &&
18135 (so->so_rcv.sb_flags & SB_AUTOSIZE))
18136 tp->rfbuf_ts = tcp_ts_getticks();
18137 /* Selective ACK's. */
18138 if (tp->t_flags & TF_SACK_PERMIT) {
18139 if (flags & TH_SYN)
18140 to.to_flags |= TOF_SACKPERM;
18141 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
18142 tp->rcv_numsacks > 0) {
18143 to.to_flags |= TOF_SACK;
18144 to.to_nsacks = tp->rcv_numsacks;
18145 to.to_sacks = (u_char *)tp->sackblks;
18148 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
18149 /* TCP-MD5 (RFC2385). */
18150 if (tp->t_flags & TF_SIGNATURE)
18151 to.to_flags |= TOF_SIGNATURE;
18152 #endif /* TCP_SIGNATURE */
18154 /* Processing the options. */
18155 hdrlen += optlen = tcp_addoptions(&to, opt);
18157 * If we wanted a TFO option to be added, but it was unable
18158 * to fit, ensure no data is sent.
18160 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
18161 !(to.to_flags & TOF_FASTOPEN))
18165 if (V_tcp_udp_tunneling_port == 0) {
18166 /* The port was removed?? */
18167 SOCKBUF_UNLOCK(&so->so_snd);
18168 #ifdef TCP_ACCOUNTING
18169 crtsc = get_cyclecount();
18170 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18171 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
18173 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
18174 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18175 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
18177 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
18180 return (EHOSTUNREACH);
18182 hdrlen += sizeof(struct udphdr);
18186 ipoptlen = ip6_optlen(tp->t_inpcb);
18189 if (tp->t_inpcb->inp_options)
18190 ipoptlen = tp->t_inpcb->inp_options->m_len -
18191 offsetof(struct ipoption, ipopt_list);
18194 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
18195 ipoptlen += ipsec_optlen;
18199 * Adjust data length if insertion of options will bump the packet
18200 * length beyond the t_maxseg length. Clear the FIN bit because we
18201 * cut off the tail of the segment.
18203 if (len + optlen + ipoptlen > tp->t_maxseg) {
18205 uint32_t if_hw_tsomax;
18209 /* extract TSO information */
18210 if_hw_tsomax = tp->t_tsomax;
18211 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
18212 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
18213 KASSERT(ipoptlen == 0,
18214 ("%s: TSO can't do IP options", __func__));
18217 * Check if we should limit by maximum payload
18220 if (if_hw_tsomax != 0) {
18221 /* compute maximum TSO length */
18222 max_len = (if_hw_tsomax - hdrlen -
18224 if (max_len <= 0) {
18226 } else if (len > max_len) {
18233 * Prevent the last segment from being fractional
18234 * unless the send sockbuf can be emptied:
18236 max_len = (tp->t_maxseg - optlen);
18237 if ((sb_offset + len) < sbavail(sb)) {
18238 moff = len % (u_int)max_len;
18245 * In case there are too many small fragments don't
18248 if (len <= segsiz) {
18253 * Send the FIN in a separate segment after the bulk
18254 * sending is done. We don't trust the TSO
18255 * implementations to clear the FIN flag on all but
18256 * the last segment.
18258 if (tp->t_flags & TF_NEEDFIN) {
18263 if (optlen + ipoptlen >= tp->t_maxseg) {
18265 * Since we don't have enough space to put
18266 * the IP header chain and the TCP header in
18267 * one packet as required by RFC 7112, don't
18268 * send it. Also ensure that at least one
18269 * byte of the payload can be put into the
18272 SOCKBUF_UNLOCK(&so->so_snd);
18277 len = tp->t_maxseg - optlen - ipoptlen;
18284 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
18285 ("%s: len > IP_MAXPACKET", __func__));
18288 if (max_linkhdr + hdrlen > MCLBYTES)
18290 if (max_linkhdr + hdrlen > MHLEN)
18292 panic("tcphdr too big");
18296 * This KASSERT is here to catch edge cases at a well defined place.
18297 * Before, those had triggered (random) panic conditions further
18300 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
18302 (flags & TH_FIN) &&
18305 * We have outstanding data, don't send a fin by itself!.
18310 * Grab a header mbuf, attaching a copy of data to be transmitted,
18311 * and initialize the header from the template for sends on this
18314 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0;
18319 if (rack->r_ctl.rc_pace_max_segs)
18320 max_val = rack->r_ctl.rc_pace_max_segs;
18321 else if (rack->rc_user_set_max_segs)
18322 max_val = rack->rc_user_set_max_segs * segsiz;
18326 * We allow a limit on sending with hptsi.
18328 if (len > max_val) {
18333 if (MHLEN < hdrlen + max_linkhdr)
18334 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
18337 m = m_gethdr(M_NOWAIT, MT_DATA);
18340 SOCKBUF_UNLOCK(sb);
18345 m->m_data += max_linkhdr;
18349 * Start the m_copy functions from the closest mbuf to the
18350 * sb_offset in the socket buffer chain.
18352 mb = sbsndptr_noadv(sb, sb_offset, &moff);
18355 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
18356 m_copydata(mb, moff, (int)len,
18357 mtod(m, caddr_t)+hdrlen);
18358 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
18359 sbsndptr_adv(sb, mb, len);
18362 struct sockbuf *msb;
18364 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
18368 m->m_next = tcp_m_copym(
18370 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
18371 ((rsm == NULL) ? hw_tls : 0)
18372 #ifdef NETFLIX_COPY_ARGS
18376 if (len <= (tp->t_maxseg - optlen)) {
18378 * Must have ran out of mbufs for the copy
18379 * shorten it to no longer need tso. Lets
18380 * not put on sendalot since we are low on
18385 if (m->m_next == NULL) {
18386 SOCKBUF_UNLOCK(sb);
18393 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
18394 if (rsm && (rsm->r_flags & RACK_TLP)) {
18396 * TLP should not count in retran count, but
18399 counter_u64_add(rack_tlp_retran, 1);
18400 counter_u64_add(rack_tlp_retran_bytes, len);
18402 tp->t_sndrexmitpack++;
18403 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
18404 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
18407 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
18411 KMOD_TCPSTAT_INC(tcps_sndpack);
18412 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
18414 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
18419 * If we're sending everything we've got, set PUSH. (This
18420 * will keep happy those implementations which only give
18421 * data to the user when a buffer fills or a PUSH comes in.)
18423 if (sb_offset + len == sbused(sb) &&
18425 !(flags & TH_SYN)) {
18427 add_flag |= RACK_HAD_PUSH;
18430 SOCKBUF_UNLOCK(sb);
18432 SOCKBUF_UNLOCK(sb);
18433 if (tp->t_flags & TF_ACKNOW)
18434 KMOD_TCPSTAT_INC(tcps_sndacks);
18435 else if (flags & (TH_SYN | TH_FIN | TH_RST))
18436 KMOD_TCPSTAT_INC(tcps_sndctrl);
18438 KMOD_TCPSTAT_INC(tcps_sndwinup);
18440 m = m_gethdr(M_NOWAIT, MT_DATA);
18447 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
18449 M_ALIGN(m, hdrlen);
18452 m->m_data += max_linkhdr;
18455 SOCKBUF_UNLOCK_ASSERT(sb);
18456 m->m_pkthdr.rcvif = (struct ifnet *)0;
18458 mac_inpcb_create_mbuf(inp, m);
18460 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
18463 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
18466 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
18467 th = rack->r_ctl.fsb.th;
18468 udp = rack->r_ctl.fsb.udp;
18472 ulen = hdrlen + len - sizeof(struct ip6_hdr);
18475 ulen = hdrlen + len - sizeof(struct ip);
18476 udp->uh_ulen = htons(ulen);
18481 ip6 = mtod(m, struct ip6_hdr *);
18483 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
18484 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
18485 udp->uh_dport = tp->t_port;
18486 ulen = hdrlen + len - sizeof(struct ip6_hdr);
18487 udp->uh_ulen = htons(ulen);
18488 th = (struct tcphdr *)(udp + 1);
18490 th = (struct tcphdr *)(ip6 + 1);
18491 tcpip_fillheaders(inp, tp->t_port, ip6, th);
18495 ip = mtod(m, struct ip *);
18497 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
18498 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
18499 udp->uh_dport = tp->t_port;
18500 ulen = hdrlen + len - sizeof(struct ip);
18501 udp->uh_ulen = htons(ulen);
18502 th = (struct tcphdr *)(udp + 1);
18504 th = (struct tcphdr *)(ip + 1);
18505 tcpip_fillheaders(inp, tp->t_port, ip, th);
18509 * Fill in fields, remembering maximum advertised window for use in
18510 * delaying messages about window sizes. If resending a FIN, be sure
18511 * not to use a new sequence number.
18513 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
18514 tp->snd_nxt == tp->snd_max)
18517 * If we are starting a connection, send ECN setup SYN packet. If we
18518 * are on a retransmit, we may resend those bits a number of times
18521 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) {
18522 flags |= tcp_ecn_output_syn_sent(tp);
18524 /* Also handle parallel SYN for ECN */
18525 if (TCPS_HAVERCVDSYN(tp->t_state) &&
18526 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
18527 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit);
18528 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
18529 (tp->t_flags2 & TF2_ECN_SND_ECE))
18530 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
18533 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
18534 ip6->ip6_flow |= htonl(ect << 20);
18539 ip->ip_tos &= ~IPTOS_ECN_MASK;
18544 * If we are doing retransmissions, then snd_nxt will not reflect
18545 * the first unsent octet. For ACK only packets, we do not want the
18546 * sequence number of the retransmitted packet, we want the sequence
18547 * number of the next unsent octet. So, if there is no data (and no
18548 * SYN or FIN), use snd_max instead of snd_nxt when filling in
18549 * ti_seq. But if we are in persist state, snd_max might reflect
18550 * one byte beyond the right edge of the window, so use snd_nxt in
18551 * that case, since we know we aren't doing a retransmission.
18552 * (retransmit and persist are mutually exclusive...)
18554 if (sack_rxmit == 0) {
18555 if (len || (flags & (TH_SYN | TH_FIN))) {
18556 th->th_seq = htonl(tp->snd_nxt);
18557 rack_seq = tp->snd_nxt;
18559 th->th_seq = htonl(tp->snd_max);
18560 rack_seq = tp->snd_max;
18563 th->th_seq = htonl(rsm->r_start);
18564 rack_seq = rsm->r_start;
18566 th->th_ack = htonl(tp->rcv_nxt);
18567 tcp_set_flags(th, flags);
18569 * Calculate receive window. Don't shrink window, but avoid silly
18571 * If a RST segment is sent, advertise a window of zero.
18573 if (flags & TH_RST) {
18576 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
18577 recwin < (long)segsiz) {
18580 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
18581 recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
18582 recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
18586 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
18587 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is
18588 * handled in syncache.
18590 if (flags & TH_SYN)
18591 th->th_win = htons((u_short)
18592 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
18594 /* Avoid shrinking window with window scaling. */
18595 recwin = roundup2(recwin, 1 << tp->rcv_scale);
18596 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
18599 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
18600 * window. This may cause the remote transmitter to stall. This
18601 * flag tells soreceive() to disable delayed acknowledgements when
18602 * draining the buffer. This can occur if the receiver is
18603 * attempting to read more data than can be buffered prior to
18604 * transmitting on the connection.
18606 if (th->th_win == 0) {
18607 tp->t_sndzerowin++;
18608 tp->t_flags |= TF_RXWIN0SENT;
18610 tp->t_flags &= ~TF_RXWIN0SENT;
18611 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
18612 /* Now are we using fsb?, if so copy the template data to the mbuf */
18613 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
18616 cpto = mtod(m, uint8_t *);
18617 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
18619 * We have just copied in:
18621 * <optional udphdr>
18622 * tcphdr (no options)
18624 * We need to grab the correct pointers into the mbuf
18625 * for both the tcp header, and possibly the udp header (if tunneling).
18626 * We do this by using the offset in the copy buffer and adding it
18627 * to the mbuf base pointer (cpto).
18631 ip6 = mtod(m, struct ip6_hdr *);
18634 ip = mtod(m, struct ip *);
18635 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
18636 /* If we have a udp header lets set it into the mbuf as well */
18638 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr));
18640 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
18641 if (to.to_flags & TOF_SIGNATURE) {
18643 * Calculate MD5 signature and put it into the place
18644 * determined before.
18645 * NOTE: since TCP options buffer doesn't point into
18646 * mbuf's data, calculate offset and use it.
18648 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
18649 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
18651 * Do not send segment if the calculation of MD5
18652 * digest has failed.
18659 bcopy(opt, th + 1, optlen);
18660 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
18663 * Put TCP length in extended header, and then checksum extended
18666 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
18670 * ip6_plen is not need to be filled now, and will be filled
18674 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
18675 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
18676 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
18677 th->th_sum = htons(0);
18678 UDPSTAT_INC(udps_opackets);
18680 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
18681 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
18682 th->th_sum = in6_cksum_pseudo(ip6,
18683 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
18688 #if defined(INET6) && defined(INET)
18694 m->m_pkthdr.csum_flags = CSUM_UDP;
18695 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
18696 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
18697 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
18698 th->th_sum = htons(0);
18699 UDPSTAT_INC(udps_opackets);
18701 m->m_pkthdr.csum_flags = CSUM_TCP;
18702 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
18703 th->th_sum = in_pseudo(ip->ip_src.s_addr,
18704 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
18705 IPPROTO_TCP + len + optlen));
18707 /* IP version must be set here for ipv4/ipv6 checking later */
18708 KASSERT(ip->ip_v == IPVERSION,
18709 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
18713 * Enable TSO and specify the size of the segments. The TCP pseudo
18714 * header checksum is always provided. XXX: Fixme: This is currently
18715 * not the case for IPv6.
18718 KASSERT(len > tp->t_maxseg - optlen,
18719 ("%s: len <= tso_segsz", __func__));
18720 m->m_pkthdr.csum_flags |= CSUM_TSO;
18721 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
18723 KASSERT(len + hdrlen == m_length(m, NULL),
18724 ("%s: mbuf chain different than expected: %d + %u != %u",
18725 __func__, len, hdrlen, m_length(m, NULL)));
18728 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
18729 hhook_run_tcp_est_out(tp, th, &to, len, tso);
18731 /* We're getting ready to send; log now. */
18732 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
18733 union tcp_log_stackspecific log;
18735 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
18736 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
18737 if (rack->rack_no_prr)
18738 log.u_bbr.flex1 = 0;
18740 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
18741 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
18742 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
18743 log.u_bbr.flex4 = orig_len;
18744 /* Save off the early/late values */
18745 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
18746 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
18747 log.u_bbr.bw_inuse = rack_get_bw(rack);
18748 log.u_bbr.flex8 = 0;
18750 if (rsm->r_flags & RACK_RWND_COLLAPSED) {
18751 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm);
18752 counter_u64_add(rack_collapsed_win_rxt, 1);
18753 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start));
18756 log.u_bbr.flex8 = 2;
18758 log.u_bbr.flex8 = 1;
18761 log.u_bbr.flex8 = 3;
18763 log.u_bbr.flex8 = 0;
18765 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
18766 log.u_bbr.flex7 = mark;
18767 log.u_bbr.flex7 <<= 8;
18768 log.u_bbr.flex7 |= pass;
18769 log.u_bbr.pkts_out = tp->t_maxseg;
18770 log.u_bbr.timeStamp = cts;
18771 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
18772 log.u_bbr.lt_epoch = cwnd_to_use;
18773 log.u_bbr.delivered = sendalot;
18774 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
18775 len, &log, false, NULL, NULL, 0, &tv);
18780 * Fill in IP length and desired time to live and send to IP level.
18781 * There should be a better way to handle ttl and tos; we could keep
18782 * them in the template, but need a way to checksum without them.
18785 * m->m_pkthdr.len should have been set before cksum calcuration,
18786 * because in6_cksum() need it.
18791 * we separately set hoplimit for every segment, since the
18792 * user might want to change the value via setsockopt. Also,
18793 * desired default hop limit might be changed via Neighbor
18796 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL);
18799 * Set the packet size here for the benefit of DTrace
18800 * probes. ip6_output() will set it properly; it's supposed
18801 * to include the option header lengths as well.
18803 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
18805 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
18806 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
18808 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
18810 if (tp->t_state == TCPS_SYN_SENT)
18811 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
18813 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
18814 /* TODO: IPv6 IP6TOS_ECT bit on */
18815 error = ip6_output(m,
18816 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
18817 inp->in6p_outputopts,
18822 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0),
18825 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
18826 mtu = inp->inp_route6.ro_nh->nh_mtu;
18829 #if defined(INET) && defined(INET6)
18834 ip->ip_len = htons(m->m_pkthdr.len);
18836 if (inp->inp_vflag & INP_IPV6PROTO)
18837 ip->ip_ttl = in6_selecthlim(inp, NULL);
18839 rack->r_ctl.fsb.hoplimit = ip->ip_ttl;
18841 * If we do path MTU discovery, then we set DF on every
18842 * packet. This might not be the best thing to do according
18843 * to RFC3390 Section 2. However the tcp hostcache migitates
18844 * the problem so it affects only the first tcp connection
18847 * NB: Don't set DF on small MTU/MSS to have a safe
18850 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
18851 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
18852 if (tp->t_port == 0 || len < V_tcp_minmss) {
18853 ip->ip_off |= htons(IP_DF);
18856 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
18859 if (tp->t_state == TCPS_SYN_SENT)
18860 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
18862 TCP_PROBE5(send, NULL, tp, ip, tp, th);
18864 error = ip_output(m,
18865 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
18871 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0,
18873 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
18874 mtu = inp->inp_route.ro_nh->nh_mtu;
18880 lgb->tlb_errno = error;
18884 * In transmit state, time the transmission and arrange for the
18885 * retransmit. In persist state, just set snd_max.
18888 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls);
18889 if (rsm && doing_tlp) {
18890 rack->rc_last_sent_tlp_past_cumack = 0;
18891 rack->rc_last_sent_tlp_seq_valid = 1;
18892 rack->r_ctl.last_sent_tlp_seq = rsm->r_start;
18893 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start;
18895 rack->forced_ack = 0; /* If we send something zap the FA flag */
18896 if (rsm && (doing_tlp == 0)) {
18897 /* Set we retransmitted */
18898 rack->rc_gp_saw_rec = 1;
18900 if (cwnd_to_use > tp->snd_ssthresh) {
18901 /* Set we sent in CA */
18902 rack->rc_gp_saw_ca = 1;
18904 /* Set we sent in SS */
18905 rack->rc_gp_saw_ss = 1;
18908 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
18909 (tp->t_flags & TF_SACK_PERMIT) &&
18910 tp->rcv_numsacks > 0)
18911 tcp_clean_dsack_blocks(tp);
18912 tot_len_this_send += len;
18914 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
18915 else if (len == 1) {
18916 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
18917 } else if (len > 1) {
18920 idx = (len / segsiz) + 3;
18921 if (idx >= TCP_MSS_ACCT_ATIMER)
18922 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
18924 counter_u64_add(rack_out_size[idx], 1);
18927 if ((rack->rack_no_prr == 0) &&
18930 if (rack->r_ctl.rc_prr_sndcnt >= len)
18931 rack->r_ctl.rc_prr_sndcnt -= len;
18933 rack->r_ctl.rc_prr_sndcnt = 0;
18937 /* Make sure the TLP is added */
18938 add_flag |= RACK_TLP;
18940 /* If its a resend without TLP then it must not have the flag */
18941 rsm->r_flags &= ~RACK_TLP;
18943 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error,
18944 rack_to_usec_ts(&tv),
18945 rsm, add_flag, s_mb, s_moff, hw_tls);
18948 if ((error == 0) &&
18950 (tp->snd_una == tp->snd_max))
18951 rack->r_ctl.rc_tlp_rxt_last_time = cts;
18953 tcp_seq startseq = tp->snd_nxt;
18955 /* Track our lost count */
18956 if (rsm && (doing_tlp == 0))
18957 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start;
18959 * Advance snd_nxt over sequence space of this segment.
18962 /* We don't log or do anything with errors */
18964 if (doing_tlp == 0) {
18967 * Not a retransmission of some
18968 * sort, new data is going out so
18969 * clear our TLP count and flag.
18971 rack->rc_tlp_in_progress = 0;
18972 rack->r_ctl.rc_tlp_cnt_out = 0;
18976 * We have just sent a TLP, mark that it is true
18977 * and make sure our in progress is set so we
18978 * continue to check the count.
18980 rack->rc_tlp_in_progress = 1;
18981 rack->r_ctl.rc_tlp_cnt_out++;
18983 if (flags & (TH_SYN | TH_FIN)) {
18984 if (flags & TH_SYN)
18986 if (flags & TH_FIN) {
18988 tp->t_flags |= TF_SENTFIN;
18991 /* In the ENOBUFS case we do *not* update snd_max */
18995 tp->snd_nxt += len;
18996 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
18997 if (tp->snd_una == tp->snd_max) {
18999 * Update the time we just added data since
19000 * none was outstanding.
19002 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
19003 tp->t_acktime = ticks;
19005 tp->snd_max = tp->snd_nxt;
19007 * Time this transmission if not a retransmission and
19008 * not currently timing anything.
19009 * This is only relevant in case of switching back to
19012 if (tp->t_rtttime == 0) {
19013 tp->t_rtttime = ticks;
19014 tp->t_rtseq = startseq;
19015 KMOD_TCPSTAT_INC(tcps_segstimed);
19018 ((tp->t_flags & TF_GPUTINPROG) == 0))
19019 rack_start_gp_measurement(tp, rack, startseq, sb_offset);
19022 * If we are doing FO we need to update the mbuf position and subtract
19023 * this happens when the peer sends us duplicate information and
19024 * we thus want to send a DSACK.
19026 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO
19027 * turned off? If not then we are going to echo multiple DSACK blocks
19028 * out (with the TSO), which we should not be doing.
19030 if (rack->r_fast_output && len) {
19031 if (rack->r_ctl.fsb.left_to_send > len)
19032 rack->r_ctl.fsb.left_to_send -= len;
19034 rack->r_ctl.fsb.left_to_send = 0;
19035 if (rack->r_ctl.fsb.left_to_send < segsiz)
19036 rack->r_fast_output = 0;
19037 if (rack->r_fast_output) {
19038 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
19039 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
19045 rack->r_ctl.rc_agg_delayed = 0;
19048 rack->r_ctl.rc_agg_early = 0;
19049 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */
19051 * Failures do not advance the seq counter above. For the
19052 * case of ENOBUFS we will fall out and retry in 1ms with
19053 * the hpts. Everything else will just have to retransmit
19056 * In any case, we do not want to loop around for another
19057 * send without a good reason.
19062 tp->t_softerror = error;
19063 #ifdef TCP_ACCOUNTING
19064 crtsc = get_cyclecount();
19065 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19066 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
19068 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
19069 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19070 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
19072 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
19078 * Pace us right away to retry in a some
19081 if (rack->r_ctl.crte != NULL) {
19082 rack_trace_point(rack, RACK_TP_HWENOBUF);
19084 rack_trace_point(rack, RACK_TP_ENOBUF);
19085 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
19086 if (rack->rc_enobuf < 0x7f)
19088 if (slot < (10 * HPTS_USEC_IN_MSEC))
19089 slot = 10 * HPTS_USEC_IN_MSEC;
19090 if (rack->r_ctl.crte != NULL) {
19091 counter_u64_add(rack_saw_enobuf_hw, 1);
19092 tcp_rl_log_enobuf(rack->r_ctl.crte);
19094 counter_u64_add(rack_saw_enobuf, 1);
19098 * For some reason the interface we used initially
19099 * to send segments changed to another or lowered
19100 * its MTU. If TSO was active we either got an
19101 * interface without TSO capabilits or TSO was
19102 * turned off. If we obtained mtu from ip_output()
19103 * then update it and try again.
19106 tp->t_flags &= ~TF_TSO;
19108 tcp_mss_update(tp, -1, mtu, NULL, NULL);
19111 slot = 10 * HPTS_USEC_IN_MSEC;
19112 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
19113 #ifdef TCP_ACCOUNTING
19114 crtsc = get_cyclecount();
19115 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19116 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
19118 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
19119 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19120 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
19122 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
19127 counter_u64_add(rack_saw_enetunreach, 1);
19131 if (TCPS_HAVERCVDSYN(tp->t_state)) {
19132 tp->t_softerror = error;
19136 slot = 10 * HPTS_USEC_IN_MSEC;
19137 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
19138 #ifdef TCP_ACCOUNTING
19139 crtsc = get_cyclecount();
19140 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19141 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
19143 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
19144 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19145 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
19147 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
19153 rack->rc_enobuf = 0;
19154 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
19155 rack->r_ctl.retran_during_recovery += len;
19157 KMOD_TCPSTAT_INC(tcps_sndtotal);
19160 * Data sent (as far as we can tell). If this advertises a larger
19161 * window than any other segment, then remember the size of the
19162 * advertised window. Any pending ACK has now been sent.
19164 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
19165 tp->rcv_adv = tp->rcv_nxt + recwin;
19167 tp->last_ack_sent = tp->rcv_nxt;
19168 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
19171 /* Do we need to turn off sendalot? */
19172 if (rack->r_ctl.rc_pace_max_segs &&
19173 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) {
19174 /* We hit our max. */
19176 } else if ((rack->rc_user_set_max_segs) &&
19177 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) {
19178 /* We hit the user defined max */
19182 if ((error == 0) && (flags & TH_FIN))
19183 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN);
19184 if (flags & TH_RST) {
19186 * We don't send again after sending a RST.
19191 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
19192 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) {
19194 * Get our pacing rate, if an error
19195 * occurred in sending (ENOBUF) we would
19196 * hit the else if with slot preset. Other
19199 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz);
19202 (rsm->r_flags & RACK_HAS_SYN) == 0 &&
19203 rack->use_rack_rr) {
19204 /* Its a retransmit and we use the rack cheat? */
19206 (rack->rc_always_pace == 0) ||
19207 (rack->r_rr_config == 1)) {
19209 * We have no pacing set or we
19210 * are using old-style rack or
19211 * we are overridden to use the old 1ms pacing.
19213 slot = rack->r_ctl.rc_min_to;
19216 /* We have sent clear the flag */
19217 rack->r_ent_rec_ns = 0;
19218 if (rack->r_must_retran) {
19220 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
19221 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
19223 * We have retransmitted all.
19225 rack->r_must_retran = 0;
19226 rack->r_ctl.rc_out_at_rto = 0;
19228 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
19230 * Sending new data will also kill
19233 rack->r_must_retran = 0;
19234 rack->r_ctl.rc_out_at_rto = 0;
19237 rack->r_ctl.fsb.recwin = recwin;
19238 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) &&
19239 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
19241 * We hit an RTO and now have past snd_max at the RTO
19242 * clear all the WAS flags.
19244 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY);
19247 /* set the rack tcb into the slot N */
19248 if ((error == 0) &&
19250 ((flags & (TH_SYN|TH_FIN)) == 0) &&
19252 (tp->snd_nxt == tp->snd_max) &&
19254 (tp->rcv_numsacks == 0) &&
19255 rack->r_fsb_inited &&
19256 TCPS_HAVEESTABLISHED(tp->t_state) &&
19257 (rack->r_must_retran == 0) &&
19258 ((tp->t_flags & TF_NEEDFIN) == 0) &&
19259 (len > 0) && (orig_len > 0) &&
19260 (orig_len > len) &&
19261 ((orig_len - len) >= segsiz) &&
19263 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
19264 /* We can send at least one more MSS using our fsb */
19266 rack->r_fast_output = 1;
19267 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
19268 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
19269 rack->r_ctl.fsb.tcp_flags = flags;
19270 rack->r_ctl.fsb.left_to_send = orig_len - len;
19272 rack->r_ctl.fsb.hw_tls = 1;
19274 rack->r_ctl.fsb.hw_tls = 0;
19275 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
19276 ("rack:%p left_to_send:%u sbavail:%u out:%u",
19277 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
19278 (tp->snd_max - tp->snd_una)));
19279 if (rack->r_ctl.fsb.left_to_send < segsiz)
19280 rack->r_fast_output = 0;
19282 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
19283 rack->r_ctl.fsb.rfo_apply_push = 1;
19285 rack->r_ctl.fsb.rfo_apply_push = 0;
19288 rack->r_fast_output = 0;
19289 rack_log_fsb(rack, tp, so, flags,
19290 ipoptlen, orig_len, len, error,
19291 (rsm == NULL), optlen, __LINE__, 2);
19292 } else if (sendalot) {
19296 if ((error == 0) &&
19298 ((flags & (TH_SYN|TH_FIN)) == 0) &&
19301 (tp->rcv_numsacks == 0) &&
19302 (tp->snd_nxt == tp->snd_max) &&
19303 (rack->r_must_retran == 0) &&
19304 rack->r_fsb_inited &&
19305 TCPS_HAVEESTABLISHED(tp->t_state) &&
19306 ((tp->t_flags & TF_NEEDFIN) == 0) &&
19307 (len > 0) && (orig_len > 0) &&
19308 (orig_len > len) &&
19309 ((orig_len - len) >= segsiz) &&
19311 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
19312 /* we can use fast_output for more */
19314 rack->r_fast_output = 1;
19315 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
19316 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
19317 rack->r_ctl.fsb.tcp_flags = flags;
19318 rack->r_ctl.fsb.left_to_send = orig_len - len;
19320 rack->r_ctl.fsb.hw_tls = 1;
19322 rack->r_ctl.fsb.hw_tls = 0;
19323 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
19324 ("rack:%p left_to_send:%u sbavail:%u out:%u",
19325 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
19326 (tp->snd_max - tp->snd_una)));
19327 if (rack->r_ctl.fsb.left_to_send < segsiz) {
19328 rack->r_fast_output = 0;
19330 if (rack->r_fast_output) {
19331 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
19332 rack->r_ctl.fsb.rfo_apply_push = 1;
19334 rack->r_ctl.fsb.rfo_apply_push = 0;
19335 rack_log_fsb(rack, tp, so, flags,
19336 ipoptlen, orig_len, len, error,
19337 (rsm == NULL), optlen, __LINE__, 3);
19339 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
19349 /* Assure when we leave that snd_nxt will point to top */
19350 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
19351 tp->snd_nxt = tp->snd_max;
19352 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0);
19353 #ifdef TCP_ACCOUNTING
19354 crtsc = get_cyclecount() - ts_val;
19355 if (tot_len_this_send) {
19356 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19357 tp->tcp_cnt_counters[SND_OUT_DATA]++;
19359 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1);
19360 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19361 tp->tcp_proc_time[SND_OUT_DATA] += crtsc;
19363 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc);
19364 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19365 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz);
19367 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz));
19369 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19370 tp->tcp_cnt_counters[SND_OUT_ACK]++;
19372 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1);
19373 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19374 tp->tcp_proc_time[SND_OUT_ACK] += crtsc;
19376 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc);
19380 if (error == ENOBUFS)
19386 rack_update_seg(struct tcp_rack *rack)
19390 orig_val = rack->r_ctl.rc_pace_max_segs;
19391 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
19392 if (orig_val != rack->r_ctl.rc_pace_max_segs)
19393 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0);
19397 rack_mtu_change(struct tcpcb *tp)
19400 * The MSS may have changed
19402 struct tcp_rack *rack;
19403 struct rack_sendmap *rsm;
19405 rack = (struct tcp_rack *)tp->t_fb_ptr;
19406 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) {
19408 * The MTU has changed we need to resend everything
19409 * since all we have sent is lost. We first fix
19410 * up the mtu though.
19412 rack_set_pace_segments(tp, rack, __LINE__, NULL);
19413 /* We treat this like a full retransmit timeout without the cwnd adjustment */
19414 rack_remxt_tmr(tp);
19415 rack->r_fast_output = 0;
19416 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp,
19417 rack->r_ctl.rc_sacked);
19418 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
19419 rack->r_must_retran = 1;
19420 /* Mark all inflight to needing to be rxt'd */
19421 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
19422 rsm->r_flags |= RACK_MUST_RXT;
19425 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
19426 /* We don't use snd_nxt to retransmit */
19427 tp->snd_nxt = tp->snd_max;
19431 rack_set_profile(struct tcp_rack *rack, int prof)
19435 /* pace_always=1 */
19436 if (rack->rc_always_pace == 0) {
19437 if (tcp_can_enable_pacing() == 0)
19440 rack->rc_always_pace = 1;
19441 if (rack->use_fixed_rate || rack->gp_ready)
19442 rack_set_cc_pacing(rack);
19443 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19444 rack->rack_attempt_hdwr_pace = 0;
19446 if (rack_use_cmp_acks)
19447 rack->r_use_cmp_ack = 1;
19448 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
19449 rack->r_use_cmp_ack)
19450 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
19452 rack->rack_enable_scwnd = 1;
19454 rack->rc_gp_dyn_mul = 1;
19456 rack->r_ctl.rack_per_of_gp_ca = 100;
19458 rack->r_rr_config = 3;
19460 rack->r_ctl.rc_no_push_at_mrtt = 2;
19462 rack->rc_pace_to_cwnd = 1;
19463 rack->rc_pace_fill_if_rttin_range = 0;
19464 rack->rtt_limit_mul = 0;
19466 rack->rack_no_prr = 1;
19468 rack->r_limit_scw = 1;
19470 rack->r_ctl.rack_per_of_gp_rec = 90;
19473 } else if (prof == 3) {
19474 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */
19475 /* pace_always=1 */
19476 if (rack->rc_always_pace == 0) {
19477 if (tcp_can_enable_pacing() == 0)
19480 rack->rc_always_pace = 1;
19481 if (rack->use_fixed_rate || rack->gp_ready)
19482 rack_set_cc_pacing(rack);
19483 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19484 rack->rack_attempt_hdwr_pace = 0;
19486 if (rack_use_cmp_acks)
19487 rack->r_use_cmp_ack = 1;
19488 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
19489 rack->r_use_cmp_ack)
19490 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
19492 rack->rack_enable_scwnd = 1;
19494 rack->rc_gp_dyn_mul = 1;
19496 rack->r_ctl.rack_per_of_gp_ca = 100;
19498 rack->r_rr_config = 3;
19500 rack->r_ctl.rc_no_push_at_mrtt = 2;
19502 rack->rc_pace_to_cwnd = 1;
19503 rack->r_fill_less_agg = 1;
19504 rack->rc_pace_fill_if_rttin_range = 0;
19505 rack->rtt_limit_mul = 0;
19507 rack->rack_no_prr = 1;
19509 rack->r_limit_scw = 1;
19511 rack->r_ctl.rack_per_of_gp_rec = 90;
19515 } else if (prof == 2) {
19517 if (rack->rc_always_pace == 0) {
19518 if (tcp_can_enable_pacing() == 0)
19521 rack->rc_always_pace = 1;
19522 if (rack->use_fixed_rate || rack->gp_ready)
19523 rack_set_cc_pacing(rack);
19524 rack->r_use_cmp_ack = 1;
19525 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
19526 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
19527 /* pace_always=1 */
19528 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19530 rack->rack_enable_scwnd = 1;
19532 rack->rc_gp_dyn_mul = 1;
19533 rack->r_ctl.rack_per_of_gp_ca = 100;
19535 rack->r_rr_config = 3;
19537 rack->r_ctl.rc_no_push_at_mrtt = 2;
19539 rack->rc_pace_to_cwnd = 1;
19540 rack->rc_pace_fill_if_rttin_range = 0;
19541 rack->rtt_limit_mul = 0;
19543 rack->rack_no_prr = 1;
19545 rack->r_limit_scw = 0;
19547 } else if (prof == 0) {
19548 /* This changes things back to the default settings */
19550 if (rack->rc_always_pace) {
19551 tcp_decrement_paced_conn();
19552 rack_undo_cc_pacing(rack);
19553 rack->rc_always_pace = 0;
19555 if (rack_pace_every_seg && tcp_can_enable_pacing()) {
19556 rack->rc_always_pace = 1;
19557 if (rack->use_fixed_rate || rack->gp_ready)
19558 rack_set_cc_pacing(rack);
19560 rack->rc_always_pace = 0;
19561 if (rack_dsack_std_based & 0x1) {
19562 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */
19563 rack->rc_rack_tmr_std_based = 1;
19565 if (rack_dsack_std_based & 0x2) {
19566 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */
19567 rack->rc_rack_use_dsack = 1;
19569 if (rack_use_cmp_acks)
19570 rack->r_use_cmp_ack = 1;
19572 rack->r_use_cmp_ack = 0;
19573 if (rack_disable_prr)
19574 rack->rack_no_prr = 1;
19576 rack->rack_no_prr = 0;
19577 if (rack_gp_no_rec_chg)
19578 rack->rc_gp_no_rec_chg = 1;
19580 rack->rc_gp_no_rec_chg = 0;
19581 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) {
19582 rack->r_mbuf_queue = 1;
19583 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
19584 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
19585 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19587 rack->r_mbuf_queue = 0;
19588 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
19590 if (rack_enable_shared_cwnd)
19591 rack->rack_enable_scwnd = 1;
19593 rack->rack_enable_scwnd = 0;
19594 if (rack_do_dyn_mul) {
19595 /* When dynamic adjustment is on CA needs to start at 100% */
19596 rack->rc_gp_dyn_mul = 1;
19597 if (rack_do_dyn_mul >= 100)
19598 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
19600 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
19601 rack->rc_gp_dyn_mul = 0;
19603 rack->r_rr_config = 0;
19604 rack->r_ctl.rc_no_push_at_mrtt = 0;
19605 rack->rc_pace_to_cwnd = 0;
19606 rack->rc_pace_fill_if_rttin_range = 0;
19607 rack->rtt_limit_mul = 0;
19609 if (rack_enable_hw_pacing)
19610 rack->rack_hdw_pace_ena = 1;
19612 rack->rack_hdw_pace_ena = 0;
19613 if (rack_disable_prr)
19614 rack->rack_no_prr = 1;
19616 rack->rack_no_prr = 0;
19617 if (rack_limits_scwnd)
19618 rack->r_limit_scw = 1;
19620 rack->r_limit_scw = 0;
19627 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval)
19629 struct deferred_opt_list *dol;
19631 dol = malloc(sizeof(struct deferred_opt_list),
19632 M_TCPFSB, M_NOWAIT|M_ZERO);
19635 * No space yikes -- fail out..
19639 dol->optname = sopt_name;
19640 dol->optval = loptval;
19641 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next);
19646 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name,
19647 uint32_t optval, uint64_t loptval)
19649 struct epoch_tracker et;
19650 struct sockopt sopt;
19651 struct cc_newreno_opts opt;
19656 switch (sopt_name) {
19658 case TCP_RACK_DSACK_OPT:
19659 RACK_OPTS_INC(tcp_rack_dsack_opt);
19660 if (optval & 0x1) {
19661 rack->rc_rack_tmr_std_based = 1;
19663 rack->rc_rack_tmr_std_based = 0;
19665 if (optval & 0x2) {
19666 rack->rc_rack_use_dsack = 1;
19668 rack->rc_rack_use_dsack = 0;
19670 rack_log_dsack_event(rack, 5, __LINE__, 0, 0);
19672 case TCP_RACK_PACING_BETA:
19673 RACK_OPTS_INC(tcp_rack_beta);
19674 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
19675 /* This only works for newreno. */
19679 if (rack->rc_pacing_cc_set) {
19681 * Set them into the real CC module
19682 * whats in the rack pcb is the old values
19683 * to be used on restoral/
19685 sopt.sopt_dir = SOPT_SET;
19686 opt.name = CC_NEWRENO_BETA;
19688 if (CC_ALGO(tp)->ctl_output != NULL)
19689 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
19696 * Not pacing yet so set it into our local
19697 * rack pcb storage.
19699 rack->r_ctl.rc_saved_beta.beta = optval;
19702 case TCP_RACK_TIMER_SLOP:
19703 RACK_OPTS_INC(tcp_rack_timer_slop);
19704 rack->r_ctl.timer_slop = optval;
19705 if (rack->rc_tp->t_srtt) {
19707 * If we have an SRTT lets update t_rxtcur
19708 * to have the new slop.
19710 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
19711 rack_rto_min, rack_rto_max,
19712 rack->r_ctl.timer_slop);
19715 case TCP_RACK_PACING_BETA_ECN:
19716 RACK_OPTS_INC(tcp_rack_beta_ecn);
19717 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
19718 /* This only works for newreno. */
19722 if (rack->rc_pacing_cc_set) {
19724 * Set them into the real CC module
19725 * whats in the rack pcb is the old values
19726 * to be used on restoral/
19728 sopt.sopt_dir = SOPT_SET;
19729 opt.name = CC_NEWRENO_BETA_ECN;
19731 if (CC_ALGO(tp)->ctl_output != NULL)
19732 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
19737 * Not pacing yet so set it into our local
19738 * rack pcb storage.
19740 rack->r_ctl.rc_saved_beta.beta_ecn = optval;
19741 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED;
19744 case TCP_DEFER_OPTIONS:
19745 RACK_OPTS_INC(tcp_defer_opt);
19747 if (rack->gp_ready) {
19752 rack->defer_options = 1;
19754 rack->defer_options = 0;
19756 case TCP_RACK_MEASURE_CNT:
19757 RACK_OPTS_INC(tcp_rack_measure_cnt);
19758 if (optval && (optval <= 0xff)) {
19759 rack->r_ctl.req_measurements = optval;
19763 case TCP_REC_ABC_VAL:
19764 RACK_OPTS_INC(tcp_rec_abc_val);
19766 rack->r_use_labc_for_rec = 1;
19768 rack->r_use_labc_for_rec = 0;
19770 case TCP_RACK_ABC_VAL:
19771 RACK_OPTS_INC(tcp_rack_abc_val);
19772 if ((optval > 0) && (optval < 255))
19773 rack->rc_labc = optval;
19777 case TCP_HDWR_UP_ONLY:
19778 RACK_OPTS_INC(tcp_pacing_up_only);
19780 rack->r_up_only = 1;
19782 rack->r_up_only = 0;
19784 case TCP_PACING_RATE_CAP:
19785 RACK_OPTS_INC(tcp_pacing_rate_cap);
19786 rack->r_ctl.bw_rate_cap = loptval;
19788 case TCP_RACK_PROFILE:
19789 RACK_OPTS_INC(tcp_profile);
19790 error = rack_set_profile(rack, optval);
19792 case TCP_USE_CMP_ACKS:
19793 RACK_OPTS_INC(tcp_use_cmp_acks);
19794 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) {
19795 /* You can't turn it off once its on! */
19797 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) {
19798 rack->r_use_cmp_ack = 1;
19799 rack->r_mbuf_queue = 1;
19800 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19802 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
19803 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
19805 case TCP_SHARED_CWND_TIME_LIMIT:
19806 RACK_OPTS_INC(tcp_lscwnd);
19808 rack->r_limit_scw = 1;
19810 rack->r_limit_scw = 0;
19812 case TCP_RACK_PACE_TO_FILL:
19813 RACK_OPTS_INC(tcp_fillcw);
19815 rack->rc_pace_to_cwnd = 0;
19817 rack->rc_pace_to_cwnd = 1;
19819 rack->r_fill_less_agg = 1;
19821 if ((optval >= rack_gp_rtt_maxmul) &&
19822 rack_gp_rtt_maxmul &&
19824 rack->rc_pace_fill_if_rttin_range = 1;
19825 rack->rtt_limit_mul = optval;
19827 rack->rc_pace_fill_if_rttin_range = 0;
19828 rack->rtt_limit_mul = 0;
19831 case TCP_RACK_NO_PUSH_AT_MAX:
19832 RACK_OPTS_INC(tcp_npush);
19834 rack->r_ctl.rc_no_push_at_mrtt = 0;
19835 else if (optval < 0xff)
19836 rack->r_ctl.rc_no_push_at_mrtt = optval;
19840 case TCP_SHARED_CWND_ENABLE:
19841 RACK_OPTS_INC(tcp_rack_scwnd);
19843 rack->rack_enable_scwnd = 0;
19845 rack->rack_enable_scwnd = 1;
19847 case TCP_RACK_MBUF_QUEUE:
19848 /* Now do we use the LRO mbuf-queue feature */
19849 RACK_OPTS_INC(tcp_rack_mbufq);
19850 if (optval || rack->r_use_cmp_ack)
19851 rack->r_mbuf_queue = 1;
19853 rack->r_mbuf_queue = 0;
19854 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
19855 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19857 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
19859 case TCP_RACK_NONRXT_CFG_RATE:
19860 RACK_OPTS_INC(tcp_rack_cfg_rate);
19862 rack->rack_rec_nonrxt_use_cr = 0;
19864 rack->rack_rec_nonrxt_use_cr = 1;
19867 RACK_OPTS_INC(tcp_rack_noprr);
19869 rack->rack_no_prr = 0;
19870 else if (optval == 1)
19871 rack->rack_no_prr = 1;
19872 else if (optval == 2)
19873 rack->no_prr_addback = 1;
19877 case TCP_TIMELY_DYN_ADJ:
19878 RACK_OPTS_INC(tcp_timely_dyn);
19880 rack->rc_gp_dyn_mul = 0;
19882 rack->rc_gp_dyn_mul = 1;
19883 if (optval >= 100) {
19885 * If the user sets something 100 or more
19886 * its the gp_ca value.
19888 rack->r_ctl.rack_per_of_gp_ca = optval;
19892 case TCP_RACK_DO_DETECTION:
19893 RACK_OPTS_INC(tcp_rack_do_detection);
19895 rack->do_detection = 0;
19897 rack->do_detection = 1;
19899 case TCP_RACK_TLP_USE:
19900 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
19904 RACK_OPTS_INC(tcp_tlp_use);
19905 rack->rack_tlp_threshold_use = optval;
19907 case TCP_RACK_TLP_REDUCE:
19908 /* RACK TLP cwnd reduction (bool) */
19909 RACK_OPTS_INC(tcp_rack_tlp_reduce);
19910 rack->r_ctl.rc_tlp_cwnd_reduce = optval;
19912 /* Pacing related ones */
19913 case TCP_RACK_PACE_ALWAYS:
19915 * zero is old rack method, 1 is new
19916 * method using a pacing rate.
19918 RACK_OPTS_INC(tcp_rack_pace_always);
19920 if (rack->rc_always_pace) {
19923 } else if (tcp_can_enable_pacing()) {
19924 rack->rc_always_pace = 1;
19925 if (rack->use_fixed_rate || rack->gp_ready)
19926 rack_set_cc_pacing(rack);
19933 if (rack->rc_always_pace) {
19934 tcp_decrement_paced_conn();
19935 rack->rc_always_pace = 0;
19936 rack_undo_cc_pacing(rack);
19939 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
19940 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19942 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
19943 /* A rate may be set irate or other, if so set seg size */
19944 rack_update_seg(rack);
19946 case TCP_BBR_RACK_INIT_RATE:
19947 RACK_OPTS_INC(tcp_initial_rate);
19949 /* Change from kbits per second to bytes per second */
19952 rack->r_ctl.init_rate = val;
19953 if (rack->rc_init_win != rack_default_init_window) {
19957 * Options don't always get applied
19958 * in the order you think. So in order
19959 * to assure we update a cwnd we need
19960 * to check and see if we are still
19961 * where we should raise the cwnd.
19963 win = rc_init_window(rack);
19964 if (SEQ_GT(tp->snd_max, tp->iss))
19965 snt = tp->snd_max - tp->iss;
19969 (tp->snd_cwnd < win))
19970 tp->snd_cwnd = win;
19972 if (rack->rc_always_pace)
19973 rack_update_seg(rack);
19975 case TCP_BBR_IWINTSO:
19976 RACK_OPTS_INC(tcp_initial_win);
19977 if (optval && (optval <= 0xff)) {
19980 rack->rc_init_win = optval;
19981 win = rc_init_window(rack);
19982 if (SEQ_GT(tp->snd_max, tp->iss))
19983 snt = tp->snd_max - tp->iss;
19988 #ifdef NETFLIX_PEAKRATE
19989 tp->t_maxpeakrate |
19991 rack->r_ctl.init_rate)) {
19993 * We are not past the initial window
19994 * and we have some bases for pacing,
19995 * so we need to possibly adjust up
19996 * the cwnd. Note even if we don't set
19997 * the cwnd, its still ok to raise the rc_init_win
19998 * which can be used coming out of idle when we
19999 * would have a rate.
20001 if (tp->snd_cwnd < win)
20002 tp->snd_cwnd = win;
20004 if (rack->rc_always_pace)
20005 rack_update_seg(rack);
20009 case TCP_RACK_FORCE_MSEG:
20010 RACK_OPTS_INC(tcp_rack_force_max_seg);
20012 rack->rc_force_max_seg = 1;
20014 rack->rc_force_max_seg = 0;
20016 case TCP_RACK_PACE_MAX_SEG:
20017 /* Max segments size in a pace in bytes */
20018 RACK_OPTS_INC(tcp_rack_max_seg);
20019 rack->rc_user_set_max_segs = optval;
20020 rack_set_pace_segments(tp, rack, __LINE__, NULL);
20022 case TCP_RACK_PACE_RATE_REC:
20023 /* Set the fixed pacing rate in Bytes per second ca */
20024 RACK_OPTS_INC(tcp_rack_pace_rate_rec);
20025 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
20026 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
20027 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
20028 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
20029 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
20030 rack->use_fixed_rate = 1;
20031 if (rack->rc_always_pace)
20032 rack_set_cc_pacing(rack);
20033 rack_log_pacing_delay_calc(rack,
20034 rack->r_ctl.rc_fixed_pacing_rate_ss,
20035 rack->r_ctl.rc_fixed_pacing_rate_ca,
20036 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
20040 case TCP_RACK_PACE_RATE_SS:
20041 /* Set the fixed pacing rate in Bytes per second ca */
20042 RACK_OPTS_INC(tcp_rack_pace_rate_ss);
20043 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
20044 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
20045 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
20046 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
20047 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
20048 rack->use_fixed_rate = 1;
20049 if (rack->rc_always_pace)
20050 rack_set_cc_pacing(rack);
20051 rack_log_pacing_delay_calc(rack,
20052 rack->r_ctl.rc_fixed_pacing_rate_ss,
20053 rack->r_ctl.rc_fixed_pacing_rate_ca,
20054 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
20055 __LINE__, NULL, 0);
20058 case TCP_RACK_PACE_RATE_CA:
20059 /* Set the fixed pacing rate in Bytes per second ca */
20060 RACK_OPTS_INC(tcp_rack_pace_rate_ca);
20061 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
20062 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
20063 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
20064 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
20065 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
20066 rack->use_fixed_rate = 1;
20067 if (rack->rc_always_pace)
20068 rack_set_cc_pacing(rack);
20069 rack_log_pacing_delay_calc(rack,
20070 rack->r_ctl.rc_fixed_pacing_rate_ss,
20071 rack->r_ctl.rc_fixed_pacing_rate_ca,
20072 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
20073 __LINE__, NULL, 0);
20075 case TCP_RACK_GP_INCREASE_REC:
20076 RACK_OPTS_INC(tcp_gp_inc_rec);
20077 rack->r_ctl.rack_per_of_gp_rec = optval;
20078 rack_log_pacing_delay_calc(rack,
20079 rack->r_ctl.rack_per_of_gp_ss,
20080 rack->r_ctl.rack_per_of_gp_ca,
20081 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
20082 __LINE__, NULL, 0);
20084 case TCP_RACK_GP_INCREASE_CA:
20085 RACK_OPTS_INC(tcp_gp_inc_ca);
20089 * We don't allow any reduction
20095 rack->r_ctl.rack_per_of_gp_ca = ca;
20096 rack_log_pacing_delay_calc(rack,
20097 rack->r_ctl.rack_per_of_gp_ss,
20098 rack->r_ctl.rack_per_of_gp_ca,
20099 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
20100 __LINE__, NULL, 0);
20102 case TCP_RACK_GP_INCREASE_SS:
20103 RACK_OPTS_INC(tcp_gp_inc_ss);
20107 * We don't allow any reduction
20113 rack->r_ctl.rack_per_of_gp_ss = ss;
20114 rack_log_pacing_delay_calc(rack,
20115 rack->r_ctl.rack_per_of_gp_ss,
20116 rack->r_ctl.rack_per_of_gp_ca,
20117 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
20118 __LINE__, NULL, 0);
20120 case TCP_RACK_RR_CONF:
20121 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate);
20122 if (optval && optval <= 3)
20123 rack->r_rr_config = optval;
20125 rack->r_rr_config = 0;
20127 case TCP_HDWR_RATE_CAP:
20128 RACK_OPTS_INC(tcp_hdwr_rate_cap);
20130 if (rack->r_rack_hw_rate_caps == 0)
20131 rack->r_rack_hw_rate_caps = 1;
20135 rack->r_rack_hw_rate_caps = 0;
20138 case TCP_BBR_HDWR_PACE:
20139 RACK_OPTS_INC(tcp_hdwr_pacing);
20141 if (rack->rack_hdrw_pacing == 0) {
20142 rack->rack_hdw_pace_ena = 1;
20143 rack->rack_attempt_hdwr_pace = 0;
20147 rack->rack_hdw_pace_ena = 0;
20149 if (rack->r_ctl.crte != NULL) {
20150 rack->rack_hdrw_pacing = 0;
20151 rack->rack_attempt_hdwr_pace = 0;
20152 tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
20153 rack->r_ctl.crte = NULL;
20158 /* End Pacing related ones */
20159 case TCP_RACK_PRR_SENDALOT:
20160 /* Allow PRR to send more than one seg */
20161 RACK_OPTS_INC(tcp_rack_prr_sendalot);
20162 rack->r_ctl.rc_prr_sendalot = optval;
20164 case TCP_RACK_MIN_TO:
20165 /* Minimum time between rack t-o's in ms */
20166 RACK_OPTS_INC(tcp_rack_min_to);
20167 rack->r_ctl.rc_min_to = optval;
20169 case TCP_RACK_EARLY_SEG:
20170 /* If early recovery max segments */
20171 RACK_OPTS_INC(tcp_rack_early_seg);
20172 rack->r_ctl.rc_early_recovery_segs = optval;
20174 case TCP_RACK_ENABLE_HYSTART:
20177 tp->ccv->flags |= CCF_HYSTART_ALLOWED;
20178 if (rack_do_hystart > RACK_HYSTART_ON)
20179 tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND;
20180 if (rack_do_hystart > RACK_HYSTART_ON_W_SC)
20181 tp->ccv->flags |= CCF_HYSTART_CONS_SSTH;
20183 tp->ccv->flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH);
20187 case TCP_RACK_REORD_THRESH:
20188 /* RACK reorder threshold (shift amount) */
20189 RACK_OPTS_INC(tcp_rack_reord_thresh);
20190 if ((optval > 0) && (optval < 31))
20191 rack->r_ctl.rc_reorder_shift = optval;
20195 case TCP_RACK_REORD_FADE:
20196 /* Does reordering fade after ms time */
20197 RACK_OPTS_INC(tcp_rack_reord_fade);
20198 rack->r_ctl.rc_reorder_fade = optval;
20200 case TCP_RACK_TLP_THRESH:
20201 /* RACK TLP theshold i.e. srtt+(srtt/N) */
20202 RACK_OPTS_INC(tcp_rack_tlp_thresh);
20204 rack->r_ctl.rc_tlp_threshold = optval;
20208 case TCP_BBR_USE_RACK_RR:
20209 RACK_OPTS_INC(tcp_rack_rr);
20211 rack->use_rack_rr = 1;
20213 rack->use_rack_rr = 0;
20215 case TCP_FAST_RSM_HACK:
20216 RACK_OPTS_INC(tcp_rack_fastrsm_hack);
20218 rack->fast_rsm_hack = 1;
20220 rack->fast_rsm_hack = 0;
20222 case TCP_RACK_PKT_DELAY:
20223 /* RACK added ms i.e. rack-rtt + reord + N */
20224 RACK_OPTS_INC(tcp_rack_pkt_delay);
20225 rack->r_ctl.rc_pkt_delay = optval;
20228 RACK_OPTS_INC(tcp_rack_delayed_ack);
20230 tp->t_delayed_ack = 0;
20232 tp->t_delayed_ack = 1;
20233 if (tp->t_flags & TF_DELACK) {
20234 tp->t_flags &= ~TF_DELACK;
20235 tp->t_flags |= TF_ACKNOW;
20236 NET_EPOCH_ENTER(et);
20238 NET_EPOCH_EXIT(et);
20242 case TCP_BBR_RACK_RTT_USE:
20243 RACK_OPTS_INC(tcp_rack_rtt_use);
20244 if ((optval != USE_RTT_HIGH) &&
20245 (optval != USE_RTT_LOW) &&
20246 (optval != USE_RTT_AVG))
20249 rack->r_ctl.rc_rate_sample_method = optval;
20251 case TCP_DATA_AFTER_CLOSE:
20252 RACK_OPTS_INC(tcp_data_after_close);
20254 rack->rc_allow_data_af_clo = 1;
20256 rack->rc_allow_data_af_clo = 0;
20261 #ifdef NETFLIX_STATS
20262 tcp_log_socket_option(tp, sopt_name, optval, error);
20269 rack_apply_deferred_options(struct tcp_rack *rack)
20271 struct deferred_opt_list *dol, *sdol;
20274 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) {
20275 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
20276 /* Disadvantage of deferal is you loose the error return */
20277 s_optval = (uint32_t)dol->optval;
20278 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval);
20279 free(dol, M_TCPDO);
20284 rack_hw_tls_change(struct tcpcb *tp, int chg)
20287 * HW tls state has changed.. fix all
20290 struct tcp_rack *rack;
20291 struct rack_sendmap *rsm;
20293 rack = (struct tcp_rack *)tp->t_fb_ptr;
20294 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
20301 rack->r_ctl.fsb.hw_tls = 1;
20303 rack->r_ctl.fsb.hw_tls = 0;
20307 rack_pru_options(struct tcpcb *tp, int flags)
20309 if (flags & PRUS_OOB)
20310 return (EOPNOTSUPP);
20314 static struct tcp_function_block __tcp_rack = {
20315 .tfb_tcp_block_name = __XSTRING(STACKNAME),
20316 .tfb_tcp_output = rack_output,
20317 .tfb_do_queued_segments = ctf_do_queued_segments,
20318 .tfb_do_segment_nounlock = rack_do_segment_nounlock,
20319 .tfb_tcp_do_segment = rack_do_segment,
20320 .tfb_tcp_ctloutput = rack_ctloutput,
20321 .tfb_tcp_fb_init = rack_init,
20322 .tfb_tcp_fb_fini = rack_fini,
20323 .tfb_tcp_timer_stop_all = rack_stopall,
20324 .tfb_tcp_timer_activate = rack_timer_activate,
20325 .tfb_tcp_timer_active = rack_timer_active,
20326 .tfb_tcp_timer_stop = rack_timer_stop,
20327 .tfb_tcp_rexmit_tmr = rack_remxt_tmr,
20328 .tfb_tcp_handoff_ok = rack_handoff_ok,
20329 .tfb_tcp_mtu_chg = rack_mtu_change,
20330 .tfb_pru_options = rack_pru_options,
20331 .tfb_hwtls_change = rack_hw_tls_change,
20332 .tfb_compute_pipe = rack_compute_pipe,
20333 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP,
20337 * rack_ctloutput() must drop the inpcb lock before performing copyin on
20338 * socket option arguments. When it re-acquires the lock after the copy, it
20339 * has to revalidate that the connection is still valid for the socket
20343 rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt)
20346 struct ip6_hdr *ip6;
20352 struct tcp_rack *rack;
20354 int32_t error = 0, optval;
20356 tp = intotcpcb(inp);
20357 rack = (struct tcp_rack *)tp->t_fb_ptr;
20358 if (rack == NULL) {
20363 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
20366 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
20369 switch (sopt->sopt_level) {
20372 MPASS(inp->inp_vflag & INP_IPV6PROTO);
20373 switch (sopt->sopt_name) {
20374 case IPV6_USE_MIN_MTU:
20375 tcp6_use_min_mtu(tp);
20379 * The DSCP codepoint has changed, update the fsb.
20381 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
20382 (rack->rc_inp->inp_flow & IPV6_FLOWINFO_MASK);
20390 switch (sopt->sopt_name) {
20393 * The DSCP codepoint has changed, update the fsb.
20395 ip->ip_tos = rack->rc_inp->inp_ip_tos;
20399 * The TTL has changed, update the fsb.
20401 ip->ip_ttl = rack->rc_inp->inp_ip_ttl;
20409 switch (sopt->sopt_name) {
20410 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */
20411 /* Pacing related ones */
20412 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */
20413 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */
20414 case TCP_BBR_IWINTSO: /* URL:tso_iwin */
20415 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */
20416 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */
20417 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */
20418 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/
20419 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */
20420 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */
20421 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */
20422 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */
20423 case TCP_RACK_RR_CONF: /* URL:rrr_conf */
20424 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */
20425 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */
20426 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */
20427 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */
20428 /* End pacing related */
20429 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */
20430 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */
20431 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */
20432 case TCP_RACK_MIN_TO: /* URL:min_to */
20433 case TCP_RACK_EARLY_SEG: /* URL:early_seg */
20434 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */
20435 case TCP_RACK_REORD_FADE: /* URL:reord_fade */
20436 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */
20437 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */
20438 case TCP_RACK_TLP_USE: /* URL:tlp_use */
20439 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */
20440 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */
20441 case TCP_RACK_DO_DETECTION: /* URL:detect */
20442 case TCP_NO_PRR: /* URL:noprr */
20443 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */
20444 case TCP_DATA_AFTER_CLOSE: /* no URL */
20445 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */
20446 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */
20447 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */
20448 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */
20449 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */
20450 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */
20451 case TCP_RACK_PROFILE: /* URL:profile */
20452 case TCP_USE_CMP_ACKS: /* URL:cmpack */
20453 case TCP_RACK_ABC_VAL: /* URL:labc */
20454 case TCP_REC_ABC_VAL: /* URL:reclabc */
20455 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */
20456 case TCP_DEFER_OPTIONS: /* URL:defer */
20457 case TCP_RACK_DSACK_OPT: /* URL:dsack */
20458 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */
20459 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */
20460 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */
20461 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */
20464 /* Filter off all unknown options to the base stack */
20465 return (tcp_default_ctloutput(inp, sopt));
20469 if (sopt->sopt_name == TCP_PACING_RATE_CAP) {
20470 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval));
20472 * We truncate it down to 32 bits for the socket-option trace this
20473 * means rates > 34Gbps won't show right, but thats probably ok.
20475 optval = (uint32_t)loptval;
20477 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
20478 /* Save it in 64 bit form too */
20484 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
20486 return (ECONNRESET);
20488 if (tp->t_fb != &__tcp_rack) {
20490 return (ENOPROTOOPT);
20492 if (rack->defer_options && (rack->gp_ready == 0) &&
20493 (sopt->sopt_name != TCP_DEFER_OPTIONS) &&
20494 (sopt->sopt_name != TCP_RACK_PACING_BETA) &&
20495 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) &&
20496 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) {
20497 /* Options are beind deferred */
20498 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) {
20502 /* No memory to defer, fail */
20507 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval);
20513 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti)
20516 INP_WLOCK_ASSERT(tp->t_inpcb);
20517 bzero(ti, sizeof(*ti));
20519 ti->tcpi_state = tp->t_state;
20520 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
20521 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
20522 if (tp->t_flags & TF_SACK_PERMIT)
20523 ti->tcpi_options |= TCPI_OPT_SACK;
20524 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
20525 ti->tcpi_options |= TCPI_OPT_WSCALE;
20526 ti->tcpi_snd_wscale = tp->snd_scale;
20527 ti->tcpi_rcv_wscale = tp->rcv_scale;
20529 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))
20530 ti->tcpi_options |= TCPI_OPT_ECN;
20531 if (tp->t_flags & TF_FASTOPEN)
20532 ti->tcpi_options |= TCPI_OPT_TFO;
20533 /* still kept in ticks is t_rcvtime */
20534 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick;
20535 /* Since we hold everything in precise useconds this is easy */
20536 ti->tcpi_rtt = tp->t_srtt;
20537 ti->tcpi_rttvar = tp->t_rttvar;
20538 ti->tcpi_rto = tp->t_rxtcur;
20539 ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
20540 ti->tcpi_snd_cwnd = tp->snd_cwnd;
20542 * FreeBSD-specific extension fields for tcp_info.
20544 ti->tcpi_rcv_space = tp->rcv_wnd;
20545 ti->tcpi_rcv_nxt = tp->rcv_nxt;
20546 ti->tcpi_snd_wnd = tp->snd_wnd;
20547 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */
20548 ti->tcpi_snd_nxt = tp->snd_nxt;
20549 ti->tcpi_snd_mss = tp->t_maxseg;
20550 ti->tcpi_rcv_mss = tp->t_maxseg;
20551 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack;
20552 ti->tcpi_rcv_ooopack = tp->t_rcvoopack;
20553 ti->tcpi_snd_zerowin = tp->t_sndzerowin;
20554 #ifdef NETFLIX_STATS
20555 ti->tcpi_total_tlp = tp->t_sndtlppack;
20556 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte;
20557 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo));
20560 if (tp->t_flags & TF_TOE) {
20561 ti->tcpi_options |= TCPI_OPT_TOE;
20562 tcp_offload_tcp_info(tp, ti);
20568 rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt)
20571 struct tcp_rack *rack;
20572 int32_t error, optval;
20573 uint64_t val, loptval;
20574 struct tcp_info ti;
20576 * Because all our options are either boolean or an int, we can just
20577 * pull everything into optval and then unlock and copy. If we ever
20578 * add a option that is not a int, then this will have quite an
20579 * impact to this routine.
20582 tp = intotcpcb(inp);
20583 rack = (struct tcp_rack *)tp->t_fb_ptr;
20584 if (rack == NULL) {
20588 switch (sopt->sopt_name) {
20590 /* First get the info filled */
20591 rack_fill_info(tp, &ti);
20592 /* Fix up the rtt related fields if needed */
20594 error = sooptcopyout(sopt, &ti, sizeof ti);
20597 * Beta is the congestion control value for NewReno that influences how
20598 * much of a backoff happens when loss is detected. It is normally set
20599 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value
20600 * when you exit recovery.
20602 case TCP_RACK_PACING_BETA:
20603 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0)
20605 else if (rack->rc_pacing_cc_set == 0)
20606 optval = rack->r_ctl.rc_saved_beta.beta;
20609 * Reach out into the CC data and report back what
20610 * I have previously set. Yeah it looks hackish but
20611 * we don't want to report the saved values.
20613 if (tp->ccv->cc_data)
20614 optval = ((struct newreno *)tp->ccv->cc_data)->beta;
20620 * Beta_ecn is the congestion control value for NewReno that influences how
20621 * much of a backoff happens when a ECN mark is detected. It is normally set
20622 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when
20623 * you exit recovery. Note that classic ECN has a beta of 50, it is only
20624 * ABE Ecn that uses this "less" value, but we do too with pacing :)
20627 case TCP_RACK_PACING_BETA_ECN:
20628 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0)
20630 else if (rack->rc_pacing_cc_set == 0)
20631 optval = rack->r_ctl.rc_saved_beta.beta_ecn;
20634 * Reach out into the CC data and report back what
20635 * I have previously set. Yeah it looks hackish but
20636 * we don't want to report the saved values.
20638 if (tp->ccv->cc_data)
20639 optval = ((struct newreno *)tp->ccv->cc_data)->beta_ecn;
20644 case TCP_RACK_DSACK_OPT:
20646 if (rack->rc_rack_tmr_std_based) {
20649 if (rack->rc_rack_use_dsack) {
20653 case TCP_RACK_ENABLE_HYSTART:
20655 if (tp->ccv->flags & CCF_HYSTART_ALLOWED) {
20656 optval = RACK_HYSTART_ON;
20657 if (tp->ccv->flags & CCF_HYSTART_CAN_SH_CWND)
20658 optval = RACK_HYSTART_ON_W_SC;
20659 if (tp->ccv->flags & CCF_HYSTART_CONS_SSTH)
20660 optval = RACK_HYSTART_ON_W_SC_C;
20662 optval = RACK_HYSTART_OFF;
20666 case TCP_FAST_RSM_HACK:
20667 optval = rack->fast_rsm_hack;
20669 case TCP_DEFER_OPTIONS:
20670 optval = rack->defer_options;
20672 case TCP_RACK_MEASURE_CNT:
20673 optval = rack->r_ctl.req_measurements;
20675 case TCP_REC_ABC_VAL:
20676 optval = rack->r_use_labc_for_rec;
20678 case TCP_RACK_ABC_VAL:
20679 optval = rack->rc_labc;
20681 case TCP_HDWR_UP_ONLY:
20682 optval= rack->r_up_only;
20684 case TCP_PACING_RATE_CAP:
20685 loptval = rack->r_ctl.bw_rate_cap;
20687 case TCP_RACK_PROFILE:
20688 /* You cannot retrieve a profile, its write only */
20691 case TCP_USE_CMP_ACKS:
20692 optval = rack->r_use_cmp_ack;
20694 case TCP_RACK_PACE_TO_FILL:
20695 optval = rack->rc_pace_to_cwnd;
20696 if (optval && rack->r_fill_less_agg)
20699 case TCP_RACK_NO_PUSH_AT_MAX:
20700 optval = rack->r_ctl.rc_no_push_at_mrtt;
20702 case TCP_SHARED_CWND_ENABLE:
20703 optval = rack->rack_enable_scwnd;
20705 case TCP_RACK_NONRXT_CFG_RATE:
20706 optval = rack->rack_rec_nonrxt_use_cr;
20709 if (rack->rack_no_prr == 1)
20711 else if (rack->no_prr_addback == 1)
20716 case TCP_RACK_DO_DETECTION:
20717 optval = rack->do_detection;
20719 case TCP_RACK_MBUF_QUEUE:
20720 /* Now do we use the LRO mbuf-queue feature */
20721 optval = rack->r_mbuf_queue;
20723 case TCP_TIMELY_DYN_ADJ:
20724 optval = rack->rc_gp_dyn_mul;
20726 case TCP_BBR_IWINTSO:
20727 optval = rack->rc_init_win;
20729 case TCP_RACK_TLP_REDUCE:
20730 /* RACK TLP cwnd reduction (bool) */
20731 optval = rack->r_ctl.rc_tlp_cwnd_reduce;
20733 case TCP_BBR_RACK_INIT_RATE:
20734 val = rack->r_ctl.init_rate;
20735 /* convert to kbits per sec */
20738 optval = (uint32_t)val;
20740 case TCP_RACK_FORCE_MSEG:
20741 optval = rack->rc_force_max_seg;
20743 case TCP_RACK_PACE_MAX_SEG:
20744 /* Max segments in a pace */
20745 optval = rack->rc_user_set_max_segs;
20747 case TCP_RACK_PACE_ALWAYS:
20748 /* Use the always pace method */
20749 optval = rack->rc_always_pace;
20751 case TCP_RACK_PRR_SENDALOT:
20752 /* Allow PRR to send more than one seg */
20753 optval = rack->r_ctl.rc_prr_sendalot;
20755 case TCP_RACK_MIN_TO:
20756 /* Minimum time between rack t-o's in ms */
20757 optval = rack->r_ctl.rc_min_to;
20759 case TCP_RACK_EARLY_SEG:
20760 /* If early recovery max segments */
20761 optval = rack->r_ctl.rc_early_recovery_segs;
20763 case TCP_RACK_REORD_THRESH:
20764 /* RACK reorder threshold (shift amount) */
20765 optval = rack->r_ctl.rc_reorder_shift;
20767 case TCP_RACK_REORD_FADE:
20768 /* Does reordering fade after ms time */
20769 optval = rack->r_ctl.rc_reorder_fade;
20771 case TCP_BBR_USE_RACK_RR:
20772 /* Do we use the rack cheat for rxt */
20773 optval = rack->use_rack_rr;
20775 case TCP_RACK_RR_CONF:
20776 optval = rack->r_rr_config;
20778 case TCP_HDWR_RATE_CAP:
20779 optval = rack->r_rack_hw_rate_caps;
20781 case TCP_BBR_HDWR_PACE:
20782 optval = rack->rack_hdw_pace_ena;
20784 case TCP_RACK_TLP_THRESH:
20785 /* RACK TLP theshold i.e. srtt+(srtt/N) */
20786 optval = rack->r_ctl.rc_tlp_threshold;
20788 case TCP_RACK_PKT_DELAY:
20789 /* RACK added ms i.e. rack-rtt + reord + N */
20790 optval = rack->r_ctl.rc_pkt_delay;
20792 case TCP_RACK_TLP_USE:
20793 optval = rack->rack_tlp_threshold_use;
20795 case TCP_RACK_PACE_RATE_CA:
20796 optval = rack->r_ctl.rc_fixed_pacing_rate_ca;
20798 case TCP_RACK_PACE_RATE_SS:
20799 optval = rack->r_ctl.rc_fixed_pacing_rate_ss;
20801 case TCP_RACK_PACE_RATE_REC:
20802 optval = rack->r_ctl.rc_fixed_pacing_rate_rec;
20804 case TCP_RACK_GP_INCREASE_SS:
20805 optval = rack->r_ctl.rack_per_of_gp_ca;
20807 case TCP_RACK_GP_INCREASE_CA:
20808 optval = rack->r_ctl.rack_per_of_gp_ss;
20810 case TCP_BBR_RACK_RTT_USE:
20811 optval = rack->r_ctl.rc_rate_sample_method;
20814 optval = tp->t_delayed_ack;
20816 case TCP_DATA_AFTER_CLOSE:
20817 optval = rack->rc_allow_data_af_clo;
20819 case TCP_SHARED_CWND_TIME_LIMIT:
20820 optval = rack->r_limit_scw;
20822 case TCP_RACK_TIMER_SLOP:
20823 optval = rack->r_ctl.timer_slop;
20826 return (tcp_default_ctloutput(inp, sopt));
20831 if (TCP_PACING_RATE_CAP)
20832 error = sooptcopyout(sopt, &loptval, sizeof loptval);
20834 error = sooptcopyout(sopt, &optval, sizeof optval);
20840 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt)
20842 if (sopt->sopt_dir == SOPT_SET) {
20843 return (rack_set_sockopt(inp, sopt));
20844 } else if (sopt->sopt_dir == SOPT_GET) {
20845 return (rack_get_sockopt(inp, sopt));
20847 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir);
20851 static const char *rack_stack_names[] = {
20852 __XSTRING(STACKNAME),
20854 __XSTRING(STACKALIAS),
20859 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
20861 memset(mem, 0, size);
20866 rack_dtor(void *mem, int32_t size, void *arg)
20871 static bool rack_mod_inited = false;
20874 tcp_addrack(module_t mod, int32_t type, void *data)
20881 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
20882 sizeof(struct rack_sendmap),
20883 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
20885 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
20886 sizeof(struct tcp_rack),
20887 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
20889 sysctl_ctx_init(&rack_sysctl_ctx);
20890 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
20891 SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
20894 __XSTRING(STACKALIAS),
20896 __XSTRING(STACKNAME),
20898 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
20900 if (rack_sysctl_root == NULL) {
20901 printf("Failed to add sysctl node\n");
20905 rack_init_sysctls();
20906 num_stacks = nitems(rack_stack_names);
20907 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
20908 rack_stack_names, &num_stacks);
20910 printf("Failed to register %s stack name for "
20911 "%s module\n", rack_stack_names[num_stacks],
20912 __XSTRING(MODNAME));
20913 sysctl_ctx_free(&rack_sysctl_ctx);
20915 uma_zdestroy(rack_zone);
20916 uma_zdestroy(rack_pcb_zone);
20917 rack_counter_destroy();
20918 printf("Failed to register rack module -- err:%d\n", err);
20921 tcp_lro_reg_mbufq();
20922 rack_mod_inited = true;
20925 err = deregister_tcp_functions(&__tcp_rack, true, false);
20928 err = deregister_tcp_functions(&__tcp_rack, false, true);
20931 if (rack_mod_inited) {
20932 uma_zdestroy(rack_zone);
20933 uma_zdestroy(rack_pcb_zone);
20934 sysctl_ctx_free(&rack_sysctl_ctx);
20935 rack_counter_destroy();
20936 rack_mod_inited = false;
20938 tcp_lro_dereg_mbufq();
20942 return (EOPNOTSUPP);
20947 static moduledata_t tcp_rack = {
20948 .name = __XSTRING(MODNAME),
20949 .evhand = tcp_addrack,
20953 MODULE_VERSION(MODNAME, 1);
20954 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
20955 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);