2 * Copyright (c) 2016-2020 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include "opt_inet6.h"
32 #include "opt_ipsec.h"
33 #include "opt_ratelimit.h"
34 #include "opt_kern_tls.h"
35 #if defined(INET) || defined(INET6)
36 #include <sys/param.h>
38 #include <sys/module.h>
39 #include <sys/kernel.h>
41 #include <sys/hhook.h>
44 #include <sys/malloc.h>
46 #include <sys/mutex.h>
48 #include <sys/proc.h> /* for proc0 declaration */
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
54 #include <sys/qmath.h>
56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
60 #include <sys/refcount.h>
61 #include <sys/queue.h>
62 #include <sys/tim_filter.h>
64 #include <sys/kthread.h>
65 #include <sys/kern_prefetch.h>
66 #include <sys/protosw.h>
68 #include <sys/sched.h>
69 #include <machine/cpu.h>
73 #include <net/route.h>
74 #include <net/route/nhop.h>
77 #define TCPSTATES /* for logging */
79 #include <netinet/in.h>
80 #include <netinet/in_kdtrace.h>
81 #include <netinet/in_pcb.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
85 #include <netinet/ip_var.h>
86 #include <netinet/ip6.h>
87 #include <netinet6/in6_pcb.h>
88 #include <netinet6/ip6_var.h>
89 #include <netinet/tcp.h>
91 #include <netinet/tcp_fsm.h>
92 #include <netinet/tcp_seq.h>
93 #include <netinet/tcp_timer.h>
94 #include <netinet/tcp_var.h>
95 #include <netinet/tcp_log_buf.h>
96 #include <netinet/tcp_syncache.h>
97 #include <netinet/tcp_hpts.h>
98 #include <netinet/tcp_ratelimit.h>
99 #include <netinet/tcp_accounting.h>
100 #include <netinet/tcpip.h>
101 #include <netinet/cc/cc.h>
102 #include <netinet/cc/cc_newreno.h>
103 #include <netinet/tcp_fastopen.h>
104 #include <netinet/tcp_lro.h>
105 #ifdef NETFLIX_SHARED_CWND
106 #include <netinet/tcp_shared_cwnd.h>
109 #include <netinet/tcp_offload.h>
112 #include <netinet6/tcp6_var.h>
114 #include <netinet/tcp_ecn.h>
116 #include <netipsec/ipsec_support.h>
118 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
119 #include <netipsec/ipsec.h>
120 #include <netipsec/ipsec6.h>
123 #include <netinet/udp.h>
124 #include <netinet/udp_var.h>
125 #include <machine/in_cksum.h>
128 #include <security/mac/mac_framework.h>
130 #include "sack_filter.h"
131 #include "tcp_rack.h"
132 #include "rack_bbr_common.h"
134 uma_zone_t rack_zone;
135 uma_zone_t rack_pcb_zone;
138 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t)))
141 VNET_DECLARE(uint32_t, newreno_beta);
142 VNET_DECLARE(uint32_t, newreno_beta_ecn);
143 #define V_newreno_beta VNET(newreno_beta)
144 #define V_newreno_beta_ecn VNET(newreno_beta_ecn)
147 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block");
148 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options");
150 struct sysctl_ctx_list rack_sysctl_ctx;
151 struct sysctl_oid *rack_sysctl_root;
157 * The RACK module incorporates a number of
158 * TCP ideas that have been put out into the IETF
159 * over the last few years:
160 * - Matt Mathis's Rate Halving which slowly drops
161 * the congestion window so that the ack clock can
162 * be maintained during a recovery.
163 * - Yuchung Cheng's RACK TCP (for which its named) that
164 * will stop us using the number of dup acks and instead
165 * use time as the gage of when we retransmit.
166 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
167 * of Dukkipati et.al.
168 * RACK depends on SACK, so if an endpoint arrives that
169 * cannot do SACK the state machine below will shuttle the
170 * connection back to using the "default" TCP stack that is
173 * To implement RACK the original TCP stack was first decomposed
174 * into a functional state machine with individual states
175 * for each of the possible TCP connection states. The do_segment
176 * functions role in life is to mandate the connection supports SACK
177 * initially and then assure that the RACK state matches the conenction
178 * state before calling the states do_segment function. Each
179 * state is simplified due to the fact that the original do_segment
180 * has been decomposed and we *know* what state we are in (no
181 * switches on the state) and all tests for SACK are gone. This
182 * greatly simplifies what each state does.
184 * TCP output is also over-written with a new version since it
185 * must maintain the new rack scoreboard.
188 static int32_t rack_tlp_thresh = 1;
189 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */
190 static int32_t rack_tlp_use_greater = 1;
191 static int32_t rack_reorder_thresh = 2;
192 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000
194 static uint8_t rack_req_measurements = 1;
195 /* Attack threshold detections */
196 static uint32_t rack_highest_sack_thresh_seen = 0;
197 static uint32_t rack_highest_move_thresh_seen = 0;
198 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */
199 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */
200 static int32_t rack_hw_rate_caps = 1; /* 1; */
201 static int32_t rack_hw_rate_min = 0; /* 1500000;*/
202 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */
203 static int32_t rack_hw_up_only = 1;
204 static int32_t rack_stats_gets_ms_rtt = 1;
205 static int32_t rack_prr_addbackmax = 2;
206 static int32_t rack_do_hystart = 0;
207 static int32_t rack_apply_rtt_with_reduced_conf = 0;
209 static int32_t rack_pkt_delay = 1000;
210 static int32_t rack_send_a_lot_in_prr = 1;
211 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */
212 static int32_t rack_verbose_logging = 0;
213 static int32_t rack_ignore_data_after_close = 1;
214 static int32_t rack_enable_shared_cwnd = 1;
215 static int32_t rack_use_cmp_acks = 1;
216 static int32_t rack_use_fsb = 1;
217 static int32_t rack_use_rfo = 1;
218 static int32_t rack_use_rsm_rfo = 1;
219 static int32_t rack_max_abc_post_recovery = 2;
220 static int32_t rack_client_low_buf = 0;
221 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */
222 #ifdef TCP_ACCOUNTING
223 static int32_t rack_tcp_accounting = 0;
225 static int32_t rack_limits_scwnd = 1;
226 static int32_t rack_enable_mqueue_for_nonpaced = 0;
227 static int32_t rack_disable_prr = 0;
228 static int32_t use_rack_rr = 1;
229 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */
230 static int32_t rack_persist_min = 250000; /* 250usec */
231 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */
232 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */
233 static int32_t rack_default_init_window = 0; /* Use system default */
234 static int32_t rack_limit_time_with_srtt = 0;
235 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */
236 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */
237 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */
238 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */
239 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */
242 * Currently regular tcp has a rto_min of 30ms
243 * the backoff goes 12 times so that ends up
244 * being a total of 122.850 seconds before a
245 * connection is killed.
247 static uint32_t rack_def_data_window = 20;
248 static uint32_t rack_goal_bdp = 2;
249 static uint32_t rack_min_srtts = 1;
250 static uint32_t rack_min_measure_usec = 0;
251 static int32_t rack_tlp_min = 10000; /* 10ms */
252 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */
253 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */
254 static const int32_t rack_free_cache = 2;
255 static int32_t rack_hptsi_segments = 40;
256 static int32_t rack_rate_sample_method = USE_RTT_LOW;
257 static int32_t rack_pace_every_seg = 0;
258 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */
259 static int32_t rack_slot_reduction = 4;
260 static int32_t rack_wma_divisor = 8; /* For WMA calculation */
261 static int32_t rack_cwnd_block_ends_measure = 0;
262 static int32_t rack_rwnd_block_ends_measure = 0;
263 static int32_t rack_def_profile = 0;
265 static int32_t rack_lower_cwnd_at_tlp = 0;
266 static int32_t rack_limited_retran = 0;
267 static int32_t rack_always_send_oldest = 0;
268 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
270 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */
271 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */
272 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */
275 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */
276 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */
277 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */
278 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */
279 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */
281 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */
282 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */
283 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */
284 static uint32_t rack_probertt_use_min_rtt_exit = 0;
285 static uint32_t rack_probe_rtt_sets_cwnd = 0;
286 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */
287 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */
288 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */
289 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */
290 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */
291 static uint32_t rack_probertt_filter_life = 10000000;
292 static uint32_t rack_probertt_lower_within = 10;
293 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */
294 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */
295 static int32_t rack_probertt_clear_is = 1;
296 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */
297 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */
300 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */
302 /* Timely information */
303 /* Combine these two gives the range of 'no change' to bw */
304 /* ie the up/down provide the upper and lower bound */
305 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */
306 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */
307 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */
308 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */
309 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */
310 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multiplier */
311 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */
312 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */
313 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */
314 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */
315 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */
316 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */
317 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */
318 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */
319 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */
320 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */
321 static int32_t rack_use_max_for_nobackoff = 0;
322 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */
323 static int32_t rack_timely_no_stopping = 0;
324 static int32_t rack_down_raise_thresh = 100;
325 static int32_t rack_req_segs = 1;
326 static uint64_t rack_bw_rate_cap = 0;
329 /* Weird delayed ack mode */
330 static int32_t rack_use_imac_dack = 0;
331 /* Rack specific counters */
332 counter_u64_t rack_saw_enobuf;
333 counter_u64_t rack_saw_enobuf_hw;
334 counter_u64_t rack_saw_enetunreach;
335 counter_u64_t rack_persists_sends;
336 counter_u64_t rack_persists_acks;
337 counter_u64_t rack_persists_loss;
338 counter_u64_t rack_persists_lost_ends;
340 counter_u64_t rack_adjust_map_bw;
342 /* Tail loss probe counters */
343 counter_u64_t rack_tlp_tot;
344 counter_u64_t rack_tlp_newdata;
345 counter_u64_t rack_tlp_retran;
346 counter_u64_t rack_tlp_retran_bytes;
347 counter_u64_t rack_to_tot;
348 counter_u64_t rack_hot_alloc;
349 counter_u64_t rack_to_alloc;
350 counter_u64_t rack_to_alloc_hard;
351 counter_u64_t rack_to_alloc_emerg;
352 counter_u64_t rack_to_alloc_limited;
353 counter_u64_t rack_alloc_limited_conns;
354 counter_u64_t rack_split_limited;
356 counter_u64_t rack_multi_single_eq;
357 counter_u64_t rack_proc_non_comp_ack;
359 counter_u64_t rack_fto_send;
360 counter_u64_t rack_fto_rsm_send;
361 counter_u64_t rack_nfto_resend;
362 counter_u64_t rack_non_fto_send;
363 counter_u64_t rack_extended_rfo;
365 counter_u64_t rack_sack_proc_all;
366 counter_u64_t rack_sack_proc_short;
367 counter_u64_t rack_sack_proc_restart;
368 counter_u64_t rack_sack_attacks_detected;
369 counter_u64_t rack_sack_attacks_reversed;
370 counter_u64_t rack_sack_used_next_merge;
371 counter_u64_t rack_sack_splits;
372 counter_u64_t rack_sack_used_prev_merge;
373 counter_u64_t rack_sack_skipped_acked;
374 counter_u64_t rack_ack_total;
375 counter_u64_t rack_express_sack;
376 counter_u64_t rack_sack_total;
377 counter_u64_t rack_move_none;
378 counter_u64_t rack_move_some;
380 counter_u64_t rack_input_idle_reduces;
381 counter_u64_t rack_collapsed_win;
382 counter_u64_t rack_collapsed_win_seen;
383 counter_u64_t rack_collapsed_win_rxt;
384 counter_u64_t rack_collapsed_win_rxt_bytes;
385 counter_u64_t rack_try_scwnd;
386 counter_u64_t rack_hw_pace_init_fail;
387 counter_u64_t rack_hw_pace_lost;
389 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
390 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
393 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2)))
395 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \
396 (tv) = (value) + slop; \
397 if ((u_long)(tv) < (u_long)(tvmin)) \
399 if ((u_long)(tv) > (u_long)(tvmax)) \
404 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line);
407 rack_process_ack(struct mbuf *m, struct tcphdr *th,
408 struct socket *so, struct tcpcb *tp, struct tcpopt *to,
409 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val);
411 rack_process_data(struct mbuf *m, struct tcphdr *th,
412 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
413 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
415 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
416 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery);
417 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
418 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack,
420 static struct rack_sendmap *
421 rack_check_recovery_mode(struct tcpcb *tp,
424 rack_cong_signal(struct tcpcb *tp,
425 uint32_t type, uint32_t ack, int );
426 static void rack_counter_destroy(void);
428 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt);
429 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
431 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override);
433 rack_do_segment(struct mbuf *m, struct tcphdr *th,
434 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
436 static void rack_dtor(void *mem, int32_t size, void *arg);
438 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
439 uint32_t flex1, uint32_t flex2,
440 uint32_t flex3, uint32_t flex4,
441 uint32_t flex5, uint32_t flex6,
442 uint16_t flex7, uint8_t mod);
445 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
446 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line,
447 struct rack_sendmap *rsm, uint8_t quality);
448 static struct rack_sendmap *
449 rack_find_high_nonack(struct tcp_rack *rack,
450 struct rack_sendmap *rsm);
451 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
452 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
453 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
454 static int rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt);
456 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
457 tcp_seq th_ack, int line, uint8_t quality);
459 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss);
460 static int32_t rack_handoff_ok(struct tcpcb *tp);
461 static int32_t rack_init(struct tcpcb *tp);
462 static void rack_init_sysctls(void);
464 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
465 struct tcphdr *th, int entered_rec, int dup_ack_struck);
467 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
468 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts,
469 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls);
472 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
473 struct rack_sendmap *rsm);
474 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm);
475 static int32_t rack_output(struct tcpcb *tp);
478 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
479 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
480 uint32_t cts, int *moved_two);
481 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq);
482 static void rack_remxt_tmr(struct tcpcb *tp);
483 static int rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt);
484 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
485 static int32_t rack_stopall(struct tcpcb *tp);
486 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
488 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
489 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag);
491 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
492 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag);
494 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
495 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack);
496 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
498 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
499 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
500 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
502 rack_do_closing(struct mbuf *m, struct tcphdr *th,
503 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
504 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
506 rack_do_established(struct mbuf *m, struct tcphdr *th,
507 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
508 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
510 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
511 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
512 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos);
514 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
515 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
516 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
518 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
519 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
520 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
522 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
523 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
524 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
526 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
527 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
528 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
530 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
531 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
532 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
533 struct rack_sendmap *
534 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
536 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt,
537 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt);
539 tcp_rack_partialack(struct tcpcb *tp);
541 rack_set_profile(struct tcp_rack *rack, int prof);
543 rack_apply_deferred_options(struct tcp_rack *rack);
545 int32_t rack_clear_counter=0;
548 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8)
551 struct cc_newreno_opts opt;
554 int error, failed = 0;
557 if (tp->t_cc == NULL) {
561 rack->rc_pacing_cc_set = 1;
562 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) {
563 /* Not new-reno we can't play games with beta! */
568 if (CC_ALGO(tp)->ctl_output == NULL) {
569 /* Huh, not using new-reno so no swaps.? */
573 /* Get the current values out */
574 sopt.sopt_valsize = sizeof(struct cc_newreno_opts);
575 sopt.sopt_dir = SOPT_GET;
576 opt.name = CC_NEWRENO_BETA;
577 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
583 opt.name = CC_NEWRENO_BETA_ECN;
584 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
589 old.beta_ecn = opt.val;
591 /* Now lets set in the values we have stored */
592 sopt.sopt_dir = SOPT_SET;
593 opt.name = CC_NEWRENO_BETA;
594 opt.val = rack->r_ctl.rc_saved_beta.beta;
595 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
600 opt.name = CC_NEWRENO_BETA_ECN;
601 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn;
602 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
607 /* Save off the values for restoral */
608 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno));
610 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
611 union tcp_log_stackspecific log;
615 ptr = ((struct newreno *)tp->t_ccv.cc_data);
616 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
617 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
618 log.u_bbr.flex1 = ptr->beta;
619 log.u_bbr.flex2 = ptr->beta_ecn;
620 log.u_bbr.flex3 = ptr->newreno_flags;
621 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta;
622 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn;
623 log.u_bbr.flex6 = failed;
624 log.u_bbr.flex7 = rack->gp_ready;
625 log.u_bbr.flex7 <<= 1;
626 log.u_bbr.flex7 |= rack->use_fixed_rate;
627 log.u_bbr.flex7 <<= 1;
628 log.u_bbr.flex7 |= rack->rc_pacing_cc_set;
629 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
630 log.u_bbr.flex8 = flex8;
631 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error,
632 0, &log, false, NULL, NULL, 0, &tv);
637 rack_set_cc_pacing(struct tcp_rack *rack)
639 if (rack->rc_pacing_cc_set)
642 * Use the swap utility placing in 3 for flex8 to id a
643 * set of a new set of values.
645 rack->rc_pacing_cc_set = 1;
646 rack_swap_beta_values(rack, 3);
650 rack_undo_cc_pacing(struct tcp_rack *rack)
652 if (rack->rc_pacing_cc_set == 0)
655 * Use the swap utility placing in 4 for flex8 to id a
656 * restoral of the old values.
658 rack->rc_pacing_cc_set = 0;
659 rack_swap_beta_values(rack, 4);
662 #ifdef NETFLIX_PEAKRATE
664 rack_update_peakrate_thr(struct tcpcb *tp)
666 /* Keep in mind that t_maxpeakrate is in B/s. */
668 peak = uqmax((tp->t_maxseg * 2),
669 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC));
670 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX);
675 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
680 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
681 if (error || req->newptr == NULL)
684 error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
689 printf("Clearing RACK counters\n");
691 counter_u64_zero(rack_tlp_tot);
692 counter_u64_zero(rack_tlp_newdata);
693 counter_u64_zero(rack_tlp_retran);
694 counter_u64_zero(rack_tlp_retran_bytes);
695 counter_u64_zero(rack_to_tot);
696 counter_u64_zero(rack_saw_enobuf);
697 counter_u64_zero(rack_saw_enobuf_hw);
698 counter_u64_zero(rack_saw_enetunreach);
699 counter_u64_zero(rack_persists_sends);
700 counter_u64_zero(rack_persists_acks);
701 counter_u64_zero(rack_persists_loss);
702 counter_u64_zero(rack_persists_lost_ends);
704 counter_u64_zero(rack_adjust_map_bw);
706 counter_u64_zero(rack_to_alloc_hard);
707 counter_u64_zero(rack_to_alloc_emerg);
708 counter_u64_zero(rack_sack_proc_all);
709 counter_u64_zero(rack_fto_send);
710 counter_u64_zero(rack_fto_rsm_send);
711 counter_u64_zero(rack_extended_rfo);
712 counter_u64_zero(rack_hw_pace_init_fail);
713 counter_u64_zero(rack_hw_pace_lost);
714 counter_u64_zero(rack_non_fto_send);
715 counter_u64_zero(rack_nfto_resend);
716 counter_u64_zero(rack_sack_proc_short);
717 counter_u64_zero(rack_sack_proc_restart);
718 counter_u64_zero(rack_to_alloc);
719 counter_u64_zero(rack_to_alloc_limited);
720 counter_u64_zero(rack_alloc_limited_conns);
721 counter_u64_zero(rack_split_limited);
722 counter_u64_zero(rack_multi_single_eq);
723 counter_u64_zero(rack_proc_non_comp_ack);
724 counter_u64_zero(rack_sack_attacks_detected);
725 counter_u64_zero(rack_sack_attacks_reversed);
726 counter_u64_zero(rack_sack_used_next_merge);
727 counter_u64_zero(rack_sack_used_prev_merge);
728 counter_u64_zero(rack_sack_splits);
729 counter_u64_zero(rack_sack_skipped_acked);
730 counter_u64_zero(rack_ack_total);
731 counter_u64_zero(rack_express_sack);
732 counter_u64_zero(rack_sack_total);
733 counter_u64_zero(rack_move_none);
734 counter_u64_zero(rack_move_some);
735 counter_u64_zero(rack_try_scwnd);
736 counter_u64_zero(rack_collapsed_win);
737 counter_u64_zero(rack_collapsed_win_rxt);
738 counter_u64_zero(rack_collapsed_win_seen);
739 counter_u64_zero(rack_collapsed_win_rxt_bytes);
741 rack_clear_counter = 0;
746 rack_init_sysctls(void)
748 struct sysctl_oid *rack_counters;
749 struct sysctl_oid *rack_attack;
750 struct sysctl_oid *rack_pacing;
751 struct sysctl_oid *rack_timely;
752 struct sysctl_oid *rack_timers;
753 struct sysctl_oid *rack_tlp;
754 struct sysctl_oid *rack_misc;
755 struct sysctl_oid *rack_features;
756 struct sysctl_oid *rack_measure;
757 struct sysctl_oid *rack_probertt;
758 struct sysctl_oid *rack_hw_pacing;
760 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
761 SYSCTL_CHILDREN(rack_sysctl_root),
764 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
765 "Rack Sack Attack Counters and Controls");
766 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
767 SYSCTL_CHILDREN(rack_sysctl_root),
770 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
772 SYSCTL_ADD_S32(&rack_sysctl_ctx,
773 SYSCTL_CHILDREN(rack_sysctl_root),
774 OID_AUTO, "rate_sample_method", CTLFLAG_RW,
775 &rack_rate_sample_method , USE_RTT_LOW,
776 "What method should we use for rate sampling 0=high, 1=low ");
777 /* Probe rtt related controls */
778 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
779 SYSCTL_CHILDREN(rack_sysctl_root),
782 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
783 "ProbeRTT related Controls");
784 SYSCTL_ADD_U16(&rack_sysctl_ctx,
785 SYSCTL_CHILDREN(rack_probertt),
786 OID_AUTO, "exit_per_hpb", CTLFLAG_RW,
787 &rack_atexit_prtt_hbp, 130,
788 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%");
789 SYSCTL_ADD_U16(&rack_sysctl_ctx,
790 SYSCTL_CHILDREN(rack_probertt),
791 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW,
792 &rack_atexit_prtt, 130,
793 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%");
794 SYSCTL_ADD_U16(&rack_sysctl_ctx,
795 SYSCTL_CHILDREN(rack_probertt),
796 OID_AUTO, "gp_per_mul", CTLFLAG_RW,
797 &rack_per_of_gp_probertt, 60,
798 "What percentage of goodput do we pace at in probertt");
799 SYSCTL_ADD_U16(&rack_sysctl_ctx,
800 SYSCTL_CHILDREN(rack_probertt),
801 OID_AUTO, "gp_per_reduce", CTLFLAG_RW,
802 &rack_per_of_gp_probertt_reduce, 10,
803 "What percentage of goodput do we reduce every gp_srtt");
804 SYSCTL_ADD_U16(&rack_sysctl_ctx,
805 SYSCTL_CHILDREN(rack_probertt),
806 OID_AUTO, "gp_per_low", CTLFLAG_RW,
807 &rack_per_of_gp_lowthresh, 40,
808 "What percentage of goodput do we allow the multiplier to fall to");
809 SYSCTL_ADD_U32(&rack_sysctl_ctx,
810 SYSCTL_CHILDREN(rack_probertt),
811 OID_AUTO, "time_between", CTLFLAG_RW,
812 & rack_time_between_probertt, 96000000,
813 "How many useconds between the lowest rtt falling must past before we enter probertt");
814 SYSCTL_ADD_U32(&rack_sysctl_ctx,
815 SYSCTL_CHILDREN(rack_probertt),
816 OID_AUTO, "safety", CTLFLAG_RW,
817 &rack_probe_rtt_safety_val, 2000000,
818 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)");
819 SYSCTL_ADD_U32(&rack_sysctl_ctx,
820 SYSCTL_CHILDREN(rack_probertt),
821 OID_AUTO, "sets_cwnd", CTLFLAG_RW,
822 &rack_probe_rtt_sets_cwnd, 0,
823 "Do we set the cwnd too (if always_lower is on)");
824 SYSCTL_ADD_U32(&rack_sysctl_ctx,
825 SYSCTL_CHILDREN(rack_probertt),
826 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW,
827 &rack_max_drain_wait, 2,
828 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal");
829 SYSCTL_ADD_U32(&rack_sysctl_ctx,
830 SYSCTL_CHILDREN(rack_probertt),
831 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW,
833 "We must drain this many gp_srtt's waiting for flight to reach goal");
834 SYSCTL_ADD_U32(&rack_sysctl_ctx,
835 SYSCTL_CHILDREN(rack_probertt),
836 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW,
837 &rack_probertt_use_min_rtt_entry, 1,
838 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry");
839 SYSCTL_ADD_U32(&rack_sysctl_ctx,
840 SYSCTL_CHILDREN(rack_probertt),
841 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW,
842 &rack_probertt_use_min_rtt_exit, 0,
843 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt");
844 SYSCTL_ADD_U32(&rack_sysctl_ctx,
845 SYSCTL_CHILDREN(rack_probertt),
846 OID_AUTO, "length_div", CTLFLAG_RW,
847 &rack_probertt_gpsrtt_cnt_div, 0,
848 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)");
849 SYSCTL_ADD_U32(&rack_sysctl_ctx,
850 SYSCTL_CHILDREN(rack_probertt),
851 OID_AUTO, "length_mul", CTLFLAG_RW,
852 &rack_probertt_gpsrtt_cnt_mul, 0,
853 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)");
854 SYSCTL_ADD_U32(&rack_sysctl_ctx,
855 SYSCTL_CHILDREN(rack_probertt),
856 OID_AUTO, "holdtim_at_target", CTLFLAG_RW,
857 &rack_min_probertt_hold, 200000,
858 "What is the minimum time we hold probertt at target");
859 SYSCTL_ADD_U32(&rack_sysctl_ctx,
860 SYSCTL_CHILDREN(rack_probertt),
861 OID_AUTO, "filter_life", CTLFLAG_RW,
862 &rack_probertt_filter_life, 10000000,
863 "What is the time for the filters life in useconds");
864 SYSCTL_ADD_U32(&rack_sysctl_ctx,
865 SYSCTL_CHILDREN(rack_probertt),
866 OID_AUTO, "lower_within", CTLFLAG_RW,
867 &rack_probertt_lower_within, 10,
868 "If the rtt goes lower within this percentage of the time, go into probe-rtt");
869 SYSCTL_ADD_U32(&rack_sysctl_ctx,
870 SYSCTL_CHILDREN(rack_probertt),
871 OID_AUTO, "must_move", CTLFLAG_RW,
872 &rack_min_rtt_movement, 250,
873 "How much is the minimum movement in rtt to count as a drop for probertt purposes");
874 SYSCTL_ADD_U32(&rack_sysctl_ctx,
875 SYSCTL_CHILDREN(rack_probertt),
876 OID_AUTO, "clear_is_cnts", CTLFLAG_RW,
877 &rack_probertt_clear_is, 1,
878 "Do we clear I/S counts on exiting probe-rtt");
879 SYSCTL_ADD_S32(&rack_sysctl_ctx,
880 SYSCTL_CHILDREN(rack_probertt),
881 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW,
882 &rack_max_drain_hbp, 1,
883 "How many extra drain gpsrtt's do we get in highly buffered paths");
884 SYSCTL_ADD_S32(&rack_sysctl_ctx,
885 SYSCTL_CHILDREN(rack_probertt),
886 OID_AUTO, "hbp_threshold", CTLFLAG_RW,
888 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold");
889 /* Pacing related sysctls */
890 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
891 SYSCTL_CHILDREN(rack_sysctl_root),
894 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
895 "Pacing related Controls");
896 SYSCTL_ADD_S32(&rack_sysctl_ctx,
897 SYSCTL_CHILDREN(rack_pacing),
898 OID_AUTO, "max_pace_over", CTLFLAG_RW,
899 &rack_max_per_above, 30,
900 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)");
901 SYSCTL_ADD_S32(&rack_sysctl_ctx,
902 SYSCTL_CHILDREN(rack_pacing),
903 OID_AUTO, "pace_to_one", CTLFLAG_RW,
904 &rack_pace_one_seg, 0,
905 "Do we allow low b/w pacing of 1MSS instead of two");
906 SYSCTL_ADD_S32(&rack_sysctl_ctx,
907 SYSCTL_CHILDREN(rack_pacing),
908 OID_AUTO, "limit_wsrtt", CTLFLAG_RW,
909 &rack_limit_time_with_srtt, 0,
910 "Do we limit pacing time based on srtt");
911 SYSCTL_ADD_S32(&rack_sysctl_ctx,
912 SYSCTL_CHILDREN(rack_pacing),
913 OID_AUTO, "init_win", CTLFLAG_RW,
914 &rack_default_init_window, 0,
915 "Do we have a rack initial window 0 = system default");
916 SYSCTL_ADD_U16(&rack_sysctl_ctx,
917 SYSCTL_CHILDREN(rack_pacing),
918 OID_AUTO, "gp_per_ss", CTLFLAG_RW,
919 &rack_per_of_gp_ss, 250,
920 "If non zero, what percentage of goodput to pace at in slow start");
921 SYSCTL_ADD_U16(&rack_sysctl_ctx,
922 SYSCTL_CHILDREN(rack_pacing),
923 OID_AUTO, "gp_per_ca", CTLFLAG_RW,
924 &rack_per_of_gp_ca, 150,
925 "If non zero, what percentage of goodput to pace at in congestion avoidance");
926 SYSCTL_ADD_U16(&rack_sysctl_ctx,
927 SYSCTL_CHILDREN(rack_pacing),
928 OID_AUTO, "gp_per_rec", CTLFLAG_RW,
929 &rack_per_of_gp_rec, 200,
930 "If non zero, what percentage of goodput to pace at in recovery");
931 SYSCTL_ADD_S32(&rack_sysctl_ctx,
932 SYSCTL_CHILDREN(rack_pacing),
933 OID_AUTO, "pace_max_seg", CTLFLAG_RW,
934 &rack_hptsi_segments, 40,
935 "What size is the max for TSO segments in pacing and burst mitigation");
936 SYSCTL_ADD_S32(&rack_sysctl_ctx,
937 SYSCTL_CHILDREN(rack_pacing),
938 OID_AUTO, "burst_reduces", CTLFLAG_RW,
939 &rack_slot_reduction, 4,
940 "When doing only burst mitigation what is the reduce divisor");
941 SYSCTL_ADD_S32(&rack_sysctl_ctx,
942 SYSCTL_CHILDREN(rack_sysctl_root),
943 OID_AUTO, "use_pacing", CTLFLAG_RW,
944 &rack_pace_every_seg, 0,
945 "If set we use pacing, if clear we use only the original burst mitigation");
946 SYSCTL_ADD_U64(&rack_sysctl_ctx,
947 SYSCTL_CHILDREN(rack_pacing),
948 OID_AUTO, "rate_cap", CTLFLAG_RW,
949 &rack_bw_rate_cap, 0,
950 "If set we apply this value to the absolute rate cap used by pacing");
951 SYSCTL_ADD_U8(&rack_sysctl_ctx,
952 SYSCTL_CHILDREN(rack_sysctl_root),
953 OID_AUTO, "req_measure_cnt", CTLFLAG_RW,
954 &rack_req_measurements, 1,
955 "If doing dynamic pacing, how many measurements must be in before we start pacing?");
956 /* Hardware pacing */
957 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
958 SYSCTL_CHILDREN(rack_sysctl_root),
961 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
962 "Pacing related Controls");
963 SYSCTL_ADD_S32(&rack_sysctl_ctx,
964 SYSCTL_CHILDREN(rack_hw_pacing),
965 OID_AUTO, "rwnd_factor", CTLFLAG_RW,
966 &rack_hw_rwnd_factor, 2,
967 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?");
968 SYSCTL_ADD_S32(&rack_sysctl_ctx,
969 SYSCTL_CHILDREN(rack_hw_pacing),
970 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW,
971 &rack_enobuf_hw_boost_mult, 2,
972 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?");
973 SYSCTL_ADD_S32(&rack_sysctl_ctx,
974 SYSCTL_CHILDREN(rack_hw_pacing),
975 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW,
976 &rack_enobuf_hw_max, 2,
977 "What is the max boost the pacing time if we see a ENOBUFS?");
978 SYSCTL_ADD_S32(&rack_sysctl_ctx,
979 SYSCTL_CHILDREN(rack_hw_pacing),
980 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW,
981 &rack_enobuf_hw_min, 2,
982 "What is the min boost the pacing time if we see a ENOBUFS?");
983 SYSCTL_ADD_S32(&rack_sysctl_ctx,
984 SYSCTL_CHILDREN(rack_hw_pacing),
985 OID_AUTO, "enable", CTLFLAG_RW,
986 &rack_enable_hw_pacing, 0,
987 "Should RACK attempt to use hw pacing?");
988 SYSCTL_ADD_S32(&rack_sysctl_ctx,
989 SYSCTL_CHILDREN(rack_hw_pacing),
990 OID_AUTO, "rate_cap", CTLFLAG_RW,
991 &rack_hw_rate_caps, 1,
992 "Does the highest hardware pacing rate cap the rate we will send at??");
993 SYSCTL_ADD_S32(&rack_sysctl_ctx,
994 SYSCTL_CHILDREN(rack_hw_pacing),
995 OID_AUTO, "rate_min", CTLFLAG_RW,
996 &rack_hw_rate_min, 0,
997 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?");
998 SYSCTL_ADD_S32(&rack_sysctl_ctx,
999 SYSCTL_CHILDREN(rack_hw_pacing),
1000 OID_AUTO, "rate_to_low", CTLFLAG_RW,
1001 &rack_hw_rate_to_low, 0,
1002 "If we fall below this rate, dis-engage hw pacing?");
1003 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1004 SYSCTL_CHILDREN(rack_hw_pacing),
1005 OID_AUTO, "up_only", CTLFLAG_RW,
1006 &rack_hw_up_only, 1,
1007 "Do we allow hw pacing to lower the rate selected?");
1008 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1009 SYSCTL_CHILDREN(rack_hw_pacing),
1010 OID_AUTO, "extra_mss_precise", CTLFLAG_RW,
1011 &rack_hw_pace_extra_slots, 2,
1012 "If the rates between software and hardware match precisely how many extra time_betweens do we get?");
1013 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1014 SYSCTL_CHILDREN(rack_sysctl_root),
1017 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1018 "Rack Timely RTT Controls");
1019 /* Timely based GP dynmics */
1020 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1021 SYSCTL_CHILDREN(rack_timely),
1022 OID_AUTO, "upper", CTLFLAG_RW,
1023 &rack_gp_per_bw_mul_up, 2,
1024 "Rack timely upper range for equal b/w (in percentage)");
1025 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1026 SYSCTL_CHILDREN(rack_timely),
1027 OID_AUTO, "lower", CTLFLAG_RW,
1028 &rack_gp_per_bw_mul_down, 4,
1029 "Rack timely lower range for equal b/w (in percentage)");
1030 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1031 SYSCTL_CHILDREN(rack_timely),
1032 OID_AUTO, "rtt_max_mul", CTLFLAG_RW,
1033 &rack_gp_rtt_maxmul, 3,
1034 "Rack timely multiplier of lowest rtt for rtt_max");
1035 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1036 SYSCTL_CHILDREN(rack_timely),
1037 OID_AUTO, "rtt_min_div", CTLFLAG_RW,
1038 &rack_gp_rtt_mindiv, 4,
1039 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt");
1040 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1041 SYSCTL_CHILDREN(rack_timely),
1042 OID_AUTO, "rtt_min_mul", CTLFLAG_RW,
1043 &rack_gp_rtt_minmul, 1,
1044 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt");
1045 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1046 SYSCTL_CHILDREN(rack_timely),
1047 OID_AUTO, "decrease", CTLFLAG_RW,
1048 &rack_gp_decrease_per, 20,
1049 "Rack timely decrease percentage of our GP multiplication factor");
1050 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1051 SYSCTL_CHILDREN(rack_timely),
1052 OID_AUTO, "increase", CTLFLAG_RW,
1053 &rack_gp_increase_per, 2,
1054 "Rack timely increase perentage of our GP multiplication factor");
1055 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1056 SYSCTL_CHILDREN(rack_timely),
1057 OID_AUTO, "lowerbound", CTLFLAG_RW,
1058 &rack_per_lower_bound, 50,
1059 "Rack timely lowest percentage we allow GP multiplier to fall to");
1060 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1061 SYSCTL_CHILDREN(rack_timely),
1062 OID_AUTO, "upperboundss", CTLFLAG_RW,
1063 &rack_per_upper_bound_ss, 0,
1064 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)");
1065 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1066 SYSCTL_CHILDREN(rack_timely),
1067 OID_AUTO, "upperboundca", CTLFLAG_RW,
1068 &rack_per_upper_bound_ca, 0,
1069 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)");
1070 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1071 SYSCTL_CHILDREN(rack_timely),
1072 OID_AUTO, "dynamicgp", CTLFLAG_RW,
1073 &rack_do_dyn_mul, 0,
1074 "Rack timely do we enable dynmaic timely goodput by default");
1075 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1076 SYSCTL_CHILDREN(rack_timely),
1077 OID_AUTO, "no_rec_red", CTLFLAG_RW,
1078 &rack_gp_no_rec_chg, 1,
1079 "Rack timely do we prohibit the recovery multiplier from being lowered");
1080 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1081 SYSCTL_CHILDREN(rack_timely),
1082 OID_AUTO, "red_clear_cnt", CTLFLAG_RW,
1083 &rack_timely_dec_clear, 6,
1084 "Rack timely what threshold do we count to before another boost during b/w decent");
1085 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1086 SYSCTL_CHILDREN(rack_timely),
1087 OID_AUTO, "max_push_rise", CTLFLAG_RW,
1088 &rack_timely_max_push_rise, 3,
1089 "Rack timely how many times do we push up with b/w increase");
1090 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1091 SYSCTL_CHILDREN(rack_timely),
1092 OID_AUTO, "max_push_drop", CTLFLAG_RW,
1093 &rack_timely_max_push_drop, 3,
1094 "Rack timely how many times do we push back on b/w decent");
1095 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1096 SYSCTL_CHILDREN(rack_timely),
1097 OID_AUTO, "min_segs", CTLFLAG_RW,
1098 &rack_timely_min_segs, 4,
1099 "Rack timely when setting the cwnd what is the min num segments");
1100 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1101 SYSCTL_CHILDREN(rack_timely),
1102 OID_AUTO, "noback_max", CTLFLAG_RW,
1103 &rack_use_max_for_nobackoff, 0,
1104 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min");
1105 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1106 SYSCTL_CHILDREN(rack_timely),
1107 OID_AUTO, "interim_timely_only", CTLFLAG_RW,
1108 &rack_timely_int_timely_only, 0,
1109 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)");
1110 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1111 SYSCTL_CHILDREN(rack_timely),
1112 OID_AUTO, "nonstop", CTLFLAG_RW,
1113 &rack_timely_no_stopping, 0,
1114 "Rack timely don't stop increase");
1115 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1116 SYSCTL_CHILDREN(rack_timely),
1117 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW,
1118 &rack_down_raise_thresh, 100,
1119 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)");
1120 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1121 SYSCTL_CHILDREN(rack_timely),
1122 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW,
1124 "Bottom dragging if not these many segments outstanding and room");
1126 /* TLP and Rack related parameters */
1127 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1128 SYSCTL_CHILDREN(rack_sysctl_root),
1131 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1132 "TLP and Rack related Controls");
1133 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1134 SYSCTL_CHILDREN(rack_tlp),
1135 OID_AUTO, "use_rrr", CTLFLAG_RW,
1137 "Do we use Rack Rapid Recovery");
1138 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1139 SYSCTL_CHILDREN(rack_tlp),
1140 OID_AUTO, "post_rec_labc", CTLFLAG_RW,
1141 &rack_max_abc_post_recovery, 2,
1142 "Since we do early recovery, do we override the l_abc to a value, if so what?");
1143 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1144 SYSCTL_CHILDREN(rack_tlp),
1145 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW,
1146 &rack_non_rxt_use_cr, 0,
1147 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk");
1148 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1149 SYSCTL_CHILDREN(rack_tlp),
1150 OID_AUTO, "tlpmethod", CTLFLAG_RW,
1151 &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
1152 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
1153 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1154 SYSCTL_CHILDREN(rack_tlp),
1155 OID_AUTO, "limit", CTLFLAG_RW,
1157 "How many TLP's can be sent without sending new data");
1158 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1159 SYSCTL_CHILDREN(rack_tlp),
1160 OID_AUTO, "use_greater", CTLFLAG_RW,
1161 &rack_tlp_use_greater, 1,
1162 "Should we use the rack_rtt time if its greater than srtt");
1163 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1164 SYSCTL_CHILDREN(rack_tlp),
1165 OID_AUTO, "tlpminto", CTLFLAG_RW,
1166 &rack_tlp_min, 10000,
1167 "TLP minimum timeout per the specification (in microseconds)");
1168 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1169 SYSCTL_CHILDREN(rack_tlp),
1170 OID_AUTO, "send_oldest", CTLFLAG_RW,
1171 &rack_always_send_oldest, 0,
1172 "Should we always send the oldest TLP and RACK-TLP");
1173 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1174 SYSCTL_CHILDREN(rack_tlp),
1175 OID_AUTO, "rack_tlimit", CTLFLAG_RW,
1176 &rack_limited_retran, 0,
1177 "How many times can a rack timeout drive out sends");
1178 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1179 SYSCTL_CHILDREN(rack_tlp),
1180 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
1181 &rack_lower_cwnd_at_tlp, 0,
1182 "When a TLP completes a retran should we enter recovery");
1183 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1184 SYSCTL_CHILDREN(rack_tlp),
1185 OID_AUTO, "reorder_thresh", CTLFLAG_RW,
1186 &rack_reorder_thresh, 2,
1187 "What factor for rack will be added when seeing reordering (shift right)");
1188 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1189 SYSCTL_CHILDREN(rack_tlp),
1190 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
1191 &rack_tlp_thresh, 1,
1192 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
1193 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1194 SYSCTL_CHILDREN(rack_tlp),
1195 OID_AUTO, "reorder_fade", CTLFLAG_RW,
1196 &rack_reorder_fade, 60000000,
1197 "Does reorder detection fade, if so how many microseconds (0 means never)");
1198 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1199 SYSCTL_CHILDREN(rack_tlp),
1200 OID_AUTO, "pktdelay", CTLFLAG_RW,
1201 &rack_pkt_delay, 1000,
1202 "Extra RACK time (in microseconds) besides reordering thresh");
1204 /* Timer related controls */
1205 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1206 SYSCTL_CHILDREN(rack_sysctl_root),
1209 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1210 "Timer related controls");
1211 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1212 SYSCTL_CHILDREN(rack_timers),
1213 OID_AUTO, "persmin", CTLFLAG_RW,
1214 &rack_persist_min, 250000,
1215 "What is the minimum time in microseconds between persists");
1216 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1217 SYSCTL_CHILDREN(rack_timers),
1218 OID_AUTO, "persmax", CTLFLAG_RW,
1219 &rack_persist_max, 2000000,
1220 "What is the largest delay in microseconds between persists");
1221 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1222 SYSCTL_CHILDREN(rack_timers),
1223 OID_AUTO, "delayed_ack", CTLFLAG_RW,
1224 &rack_delayed_ack_time, 40000,
1225 "Delayed ack time (40ms in microseconds)");
1226 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1227 SYSCTL_CHILDREN(rack_timers),
1228 OID_AUTO, "minrto", CTLFLAG_RW,
1229 &rack_rto_min, 30000,
1230 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP");
1231 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1232 SYSCTL_CHILDREN(rack_timers),
1233 OID_AUTO, "maxrto", CTLFLAG_RW,
1234 &rack_rto_max, 4000000,
1235 "Maximum RTO in microseconds -- should be at least as large as min_rto");
1236 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1237 SYSCTL_CHILDREN(rack_timers),
1238 OID_AUTO, "minto", CTLFLAG_RW,
1240 "Minimum rack timeout in microseconds");
1241 /* Measure controls */
1242 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1243 SYSCTL_CHILDREN(rack_sysctl_root),
1246 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1247 "Measure related controls");
1248 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1249 SYSCTL_CHILDREN(rack_measure),
1250 OID_AUTO, "wma_divisor", CTLFLAG_RW,
1251 &rack_wma_divisor, 8,
1252 "When doing b/w calculation what is the divisor for the WMA");
1253 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1254 SYSCTL_CHILDREN(rack_measure),
1255 OID_AUTO, "end_cwnd", CTLFLAG_RW,
1256 &rack_cwnd_block_ends_measure, 0,
1257 "Does a cwnd just-return end the measurement window (app limited)");
1258 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1259 SYSCTL_CHILDREN(rack_measure),
1260 OID_AUTO, "end_rwnd", CTLFLAG_RW,
1261 &rack_rwnd_block_ends_measure, 0,
1262 "Does an rwnd just-return end the measurement window (app limited -- not persists)");
1263 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1264 SYSCTL_CHILDREN(rack_measure),
1265 OID_AUTO, "min_target", CTLFLAG_RW,
1266 &rack_def_data_window, 20,
1267 "What is the minimum target window (in mss) for a GP measurements");
1268 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1269 SYSCTL_CHILDREN(rack_measure),
1270 OID_AUTO, "goal_bdp", CTLFLAG_RW,
1272 "What is the goal BDP to measure");
1273 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1274 SYSCTL_CHILDREN(rack_measure),
1275 OID_AUTO, "min_srtts", CTLFLAG_RW,
1277 "What is the goal BDP to measure");
1278 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1279 SYSCTL_CHILDREN(rack_measure),
1280 OID_AUTO, "min_measure_tim", CTLFLAG_RW,
1281 &rack_min_measure_usec, 0,
1282 "What is the Minimum time time for a measurement if 0, this is off");
1284 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1285 SYSCTL_CHILDREN(rack_sysctl_root),
1288 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1289 "Feature controls");
1290 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1291 SYSCTL_CHILDREN(rack_features),
1292 OID_AUTO, "cmpack", CTLFLAG_RW,
1293 &rack_use_cmp_acks, 1,
1294 "Should RACK have LRO send compressed acks");
1295 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1296 SYSCTL_CHILDREN(rack_features),
1297 OID_AUTO, "fsb", CTLFLAG_RW,
1299 "Should RACK use the fast send block?");
1300 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1301 SYSCTL_CHILDREN(rack_features),
1302 OID_AUTO, "rfo", CTLFLAG_RW,
1304 "Should RACK use rack_fast_output()?");
1305 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1306 SYSCTL_CHILDREN(rack_features),
1307 OID_AUTO, "rsmrfo", CTLFLAG_RW,
1308 &rack_use_rsm_rfo, 1,
1309 "Should RACK use rack_fast_rsm_output()?");
1310 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1311 SYSCTL_CHILDREN(rack_features),
1312 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW,
1313 &rack_enable_mqueue_for_nonpaced, 0,
1314 "Should RACK use mbuf queuing for non-paced connections");
1315 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1316 SYSCTL_CHILDREN(rack_features),
1317 OID_AUTO, "hystartplusplus", CTLFLAG_RW,
1318 &rack_do_hystart, 0,
1319 "Should RACK enable HyStart++ on connections?");
1320 /* Misc rack controls */
1321 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1322 SYSCTL_CHILDREN(rack_sysctl_root),
1325 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1326 "Misc related controls");
1327 #ifdef TCP_ACCOUNTING
1328 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1329 SYSCTL_CHILDREN(rack_misc),
1330 OID_AUTO, "tcp_acct", CTLFLAG_RW,
1331 &rack_tcp_accounting, 0,
1332 "Should we turn on TCP accounting for all rack sessions?");
1334 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1335 SYSCTL_CHILDREN(rack_misc),
1336 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW,
1337 &rack_apply_rtt_with_reduced_conf, 0,
1338 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?");
1339 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1340 SYSCTL_CHILDREN(rack_misc),
1341 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW,
1342 &rack_dsack_std_based, 3,
1343 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?");
1344 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1345 SYSCTL_CHILDREN(rack_misc),
1346 OID_AUTO, "prr_addback_max", CTLFLAG_RW,
1347 &rack_prr_addbackmax, 2,
1348 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?");
1349 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1350 SYSCTL_CHILDREN(rack_misc),
1351 OID_AUTO, "stats_gets_ms", CTLFLAG_RW,
1352 &rack_stats_gets_ms_rtt, 1,
1353 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?");
1354 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1355 SYSCTL_CHILDREN(rack_misc),
1356 OID_AUTO, "clientlowbuf", CTLFLAG_RW,
1357 &rack_client_low_buf, 0,
1358 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?");
1359 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1360 SYSCTL_CHILDREN(rack_misc),
1361 OID_AUTO, "defprofile", CTLFLAG_RW,
1362 &rack_def_profile, 0,
1363 "Should RACK use a default profile (0=no, num == profile num)?");
1364 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1365 SYSCTL_CHILDREN(rack_misc),
1366 OID_AUTO, "shared_cwnd", CTLFLAG_RW,
1367 &rack_enable_shared_cwnd, 1,
1368 "Should RACK try to use the shared cwnd on connections where allowed");
1369 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1370 SYSCTL_CHILDREN(rack_misc),
1371 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW,
1372 &rack_limits_scwnd, 1,
1373 "Should RACK place low end time limits on the shared cwnd feature");
1374 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1375 SYSCTL_CHILDREN(rack_misc),
1376 OID_AUTO, "iMac_dack", CTLFLAG_RW,
1377 &rack_use_imac_dack, 0,
1378 "Should RACK try to emulate iMac delayed ack");
1379 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1380 SYSCTL_CHILDREN(rack_misc),
1381 OID_AUTO, "no_prr", CTLFLAG_RW,
1382 &rack_disable_prr, 0,
1383 "Should RACK not use prr and only pace (must have pacing on)");
1384 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1385 SYSCTL_CHILDREN(rack_misc),
1386 OID_AUTO, "bb_verbose", CTLFLAG_RW,
1387 &rack_verbose_logging, 0,
1388 "Should RACK black box logging be verbose");
1389 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1390 SYSCTL_CHILDREN(rack_misc),
1391 OID_AUTO, "data_after_close", CTLFLAG_RW,
1392 &rack_ignore_data_after_close, 1,
1393 "Do we hold off sending a RST until all pending data is ack'd");
1394 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1395 SYSCTL_CHILDREN(rack_misc),
1396 OID_AUTO, "no_sack_needed", CTLFLAG_RW,
1397 &rack_sack_not_required, 1,
1398 "Do we allow rack to run on connections not supporting SACK");
1399 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1400 SYSCTL_CHILDREN(rack_misc),
1401 OID_AUTO, "prr_sendalot", CTLFLAG_RW,
1402 &rack_send_a_lot_in_prr, 1,
1403 "Send a lot in prr");
1404 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1405 SYSCTL_CHILDREN(rack_misc),
1406 OID_AUTO, "autoscale", CTLFLAG_RW,
1407 &rack_autosndbuf_inc, 20,
1408 "What percentage should rack scale up its snd buffer by?");
1409 /* Sack Attacker detection stuff */
1410 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1411 SYSCTL_CHILDREN(rack_attack),
1412 OID_AUTO, "detect_highsackratio", CTLFLAG_RW,
1413 &rack_highest_sack_thresh_seen, 0,
1414 "Highest sack to ack ratio seen");
1415 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1416 SYSCTL_CHILDREN(rack_attack),
1417 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW,
1418 &rack_highest_move_thresh_seen, 0,
1419 "Highest move to non-move ratio seen");
1420 rack_ack_total = counter_u64_alloc(M_WAITOK);
1421 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1422 SYSCTL_CHILDREN(rack_attack),
1423 OID_AUTO, "acktotal", CTLFLAG_RD,
1425 "Total number of Ack's");
1426 rack_express_sack = counter_u64_alloc(M_WAITOK);
1427 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1428 SYSCTL_CHILDREN(rack_attack),
1429 OID_AUTO, "exp_sacktotal", CTLFLAG_RD,
1431 "Total expresss number of Sack's");
1432 rack_sack_total = counter_u64_alloc(M_WAITOK);
1433 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1434 SYSCTL_CHILDREN(rack_attack),
1435 OID_AUTO, "sacktotal", CTLFLAG_RD,
1437 "Total number of SACKs");
1438 rack_move_none = counter_u64_alloc(M_WAITOK);
1439 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1440 SYSCTL_CHILDREN(rack_attack),
1441 OID_AUTO, "move_none", CTLFLAG_RD,
1443 "Total number of SACK index reuse of positions under threshold");
1444 rack_move_some = counter_u64_alloc(M_WAITOK);
1445 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1446 SYSCTL_CHILDREN(rack_attack),
1447 OID_AUTO, "move_some", CTLFLAG_RD,
1449 "Total number of SACK index reuse of positions over threshold");
1450 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK);
1451 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1452 SYSCTL_CHILDREN(rack_attack),
1453 OID_AUTO, "attacks", CTLFLAG_RD,
1454 &rack_sack_attacks_detected,
1455 "Total number of SACK attackers that had sack disabled");
1456 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK);
1457 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1458 SYSCTL_CHILDREN(rack_attack),
1459 OID_AUTO, "reversed", CTLFLAG_RD,
1460 &rack_sack_attacks_reversed,
1461 "Total number of SACK attackers that were later determined false positive");
1462 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK);
1463 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1464 SYSCTL_CHILDREN(rack_attack),
1465 OID_AUTO, "nextmerge", CTLFLAG_RD,
1466 &rack_sack_used_next_merge,
1467 "Total number of times we used the next merge");
1468 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK);
1469 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1470 SYSCTL_CHILDREN(rack_attack),
1471 OID_AUTO, "prevmerge", CTLFLAG_RD,
1472 &rack_sack_used_prev_merge,
1473 "Total number of times we used the prev merge");
1475 rack_fto_send = counter_u64_alloc(M_WAITOK);
1476 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1477 SYSCTL_CHILDREN(rack_counters),
1478 OID_AUTO, "fto_send", CTLFLAG_RD,
1479 &rack_fto_send, "Total number of rack_fast_output sends");
1480 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK);
1481 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1482 SYSCTL_CHILDREN(rack_counters),
1483 OID_AUTO, "fto_rsm_send", CTLFLAG_RD,
1484 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends");
1485 rack_nfto_resend = counter_u64_alloc(M_WAITOK);
1486 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1487 SYSCTL_CHILDREN(rack_counters),
1488 OID_AUTO, "nfto_resend", CTLFLAG_RD,
1489 &rack_nfto_resend, "Total number of rack_output retransmissions");
1490 rack_non_fto_send = counter_u64_alloc(M_WAITOK);
1491 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1492 SYSCTL_CHILDREN(rack_counters),
1493 OID_AUTO, "nfto_send", CTLFLAG_RD,
1494 &rack_non_fto_send, "Total number of rack_output first sends");
1495 rack_extended_rfo = counter_u64_alloc(M_WAITOK);
1496 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1497 SYSCTL_CHILDREN(rack_counters),
1498 OID_AUTO, "rfo_extended", CTLFLAG_RD,
1499 &rack_extended_rfo, "Total number of times we extended rfo");
1501 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK);
1502 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1503 SYSCTL_CHILDREN(rack_counters),
1504 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD,
1505 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing");
1506 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK);
1508 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1509 SYSCTL_CHILDREN(rack_counters),
1510 OID_AUTO, "hwpace_lost", CTLFLAG_RD,
1511 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing");
1512 rack_tlp_tot = counter_u64_alloc(M_WAITOK);
1513 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1514 SYSCTL_CHILDREN(rack_counters),
1515 OID_AUTO, "tlp_to_total", CTLFLAG_RD,
1517 "Total number of tail loss probe expirations");
1518 rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
1519 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1520 SYSCTL_CHILDREN(rack_counters),
1521 OID_AUTO, "tlp_new", CTLFLAG_RD,
1523 "Total number of tail loss probe sending new data");
1524 rack_tlp_retran = counter_u64_alloc(M_WAITOK);
1525 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1526 SYSCTL_CHILDREN(rack_counters),
1527 OID_AUTO, "tlp_retran", CTLFLAG_RD,
1529 "Total number of tail loss probe sending retransmitted data");
1530 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
1531 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1532 SYSCTL_CHILDREN(rack_counters),
1533 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
1534 &rack_tlp_retran_bytes,
1535 "Total bytes of tail loss probe sending retransmitted data");
1536 rack_to_tot = counter_u64_alloc(M_WAITOK);
1537 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1538 SYSCTL_CHILDREN(rack_counters),
1539 OID_AUTO, "rack_to_tot", CTLFLAG_RD,
1541 "Total number of times the rack to expired");
1542 rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
1543 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1544 SYSCTL_CHILDREN(rack_counters),
1545 OID_AUTO, "saw_enobufs", CTLFLAG_RD,
1547 "Total number of times a sends returned enobuf for non-hdwr paced connections");
1548 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK);
1549 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1550 SYSCTL_CHILDREN(rack_counters),
1551 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD,
1552 &rack_saw_enobuf_hw,
1553 "Total number of times a send returned enobuf for hdwr paced connections");
1554 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
1555 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1556 SYSCTL_CHILDREN(rack_counters),
1557 OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
1558 &rack_saw_enetunreach,
1559 "Total number of times a send received a enetunreachable");
1560 rack_hot_alloc = counter_u64_alloc(M_WAITOK);
1561 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1562 SYSCTL_CHILDREN(rack_counters),
1563 OID_AUTO, "alloc_hot", CTLFLAG_RD,
1565 "Total allocations from the top of our list");
1566 rack_to_alloc = counter_u64_alloc(M_WAITOK);
1567 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1568 SYSCTL_CHILDREN(rack_counters),
1569 OID_AUTO, "allocs", CTLFLAG_RD,
1571 "Total allocations of tracking structures");
1572 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
1573 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1574 SYSCTL_CHILDREN(rack_counters),
1575 OID_AUTO, "allochard", CTLFLAG_RD,
1576 &rack_to_alloc_hard,
1577 "Total allocations done with sleeping the hard way");
1578 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
1579 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1580 SYSCTL_CHILDREN(rack_counters),
1581 OID_AUTO, "allocemerg", CTLFLAG_RD,
1582 &rack_to_alloc_emerg,
1583 "Total allocations done from emergency cache");
1584 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK);
1585 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1586 SYSCTL_CHILDREN(rack_counters),
1587 OID_AUTO, "alloc_limited", CTLFLAG_RD,
1588 &rack_to_alloc_limited,
1589 "Total allocations dropped due to limit");
1590 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK);
1591 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1592 SYSCTL_CHILDREN(rack_counters),
1593 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD,
1594 &rack_alloc_limited_conns,
1595 "Connections with allocations dropped due to limit");
1596 rack_split_limited = counter_u64_alloc(M_WAITOK);
1597 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1598 SYSCTL_CHILDREN(rack_counters),
1599 OID_AUTO, "split_limited", CTLFLAG_RD,
1600 &rack_split_limited,
1601 "Split allocations dropped due to limit");
1602 rack_persists_sends = counter_u64_alloc(M_WAITOK);
1603 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1604 SYSCTL_CHILDREN(rack_counters),
1605 OID_AUTO, "persist_sends", CTLFLAG_RD,
1606 &rack_persists_sends,
1607 "Number of times we sent a persist probe");
1608 rack_persists_acks = counter_u64_alloc(M_WAITOK);
1609 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1610 SYSCTL_CHILDREN(rack_counters),
1611 OID_AUTO, "persist_acks", CTLFLAG_RD,
1612 &rack_persists_acks,
1613 "Number of times a persist probe was acked");
1614 rack_persists_loss = counter_u64_alloc(M_WAITOK);
1615 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1616 SYSCTL_CHILDREN(rack_counters),
1617 OID_AUTO, "persist_loss", CTLFLAG_RD,
1618 &rack_persists_loss,
1619 "Number of times we detected a lost persist probe (no ack)");
1620 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK);
1621 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1622 SYSCTL_CHILDREN(rack_counters),
1623 OID_AUTO, "persist_loss_ends", CTLFLAG_RD,
1624 &rack_persists_lost_ends,
1625 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort");
1627 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK);
1628 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1629 SYSCTL_CHILDREN(rack_counters),
1630 OID_AUTO, "map_adjust_req", CTLFLAG_RD,
1631 &rack_adjust_map_bw,
1632 "Number of times we hit the case where the sb went up and down on a sendmap entry");
1634 rack_multi_single_eq = counter_u64_alloc(M_WAITOK);
1635 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1636 SYSCTL_CHILDREN(rack_counters),
1637 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD,
1638 &rack_multi_single_eq,
1639 "Number of compressed acks total represented");
1640 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK);
1641 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1642 SYSCTL_CHILDREN(rack_counters),
1643 OID_AUTO, "cmp_ack_not", CTLFLAG_RD,
1644 &rack_proc_non_comp_ack,
1645 "Number of non compresseds acks that we processed");
1648 rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
1649 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1650 SYSCTL_CHILDREN(rack_counters),
1651 OID_AUTO, "sack_long", CTLFLAG_RD,
1652 &rack_sack_proc_all,
1653 "Total times we had to walk whole list for sack processing");
1654 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
1655 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1656 SYSCTL_CHILDREN(rack_counters),
1657 OID_AUTO, "sack_restart", CTLFLAG_RD,
1658 &rack_sack_proc_restart,
1659 "Total times we had to walk whole list due to a restart");
1660 rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
1661 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1662 SYSCTL_CHILDREN(rack_counters),
1663 OID_AUTO, "sack_short", CTLFLAG_RD,
1664 &rack_sack_proc_short,
1665 "Total times we took shortcut for sack processing");
1666 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK);
1667 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1668 SYSCTL_CHILDREN(rack_attack),
1669 OID_AUTO, "skipacked", CTLFLAG_RD,
1670 &rack_sack_skipped_acked,
1671 "Total number of times we skipped previously sacked");
1672 rack_sack_splits = counter_u64_alloc(M_WAITOK);
1673 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1674 SYSCTL_CHILDREN(rack_attack),
1675 OID_AUTO, "ofsplit", CTLFLAG_RD,
1677 "Total number of times we did the old fashion tree split");
1678 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
1679 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1680 SYSCTL_CHILDREN(rack_counters),
1681 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
1682 &rack_input_idle_reduces,
1683 "Total number of idle reductions on input");
1684 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK);
1685 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1686 SYSCTL_CHILDREN(rack_counters),
1687 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD,
1688 &rack_collapsed_win_seen,
1689 "Total number of collapsed window events seen (where our window shrinks)");
1691 rack_collapsed_win = counter_u64_alloc(M_WAITOK);
1692 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1693 SYSCTL_CHILDREN(rack_counters),
1694 OID_AUTO, "collapsed_win", CTLFLAG_RD,
1695 &rack_collapsed_win,
1696 "Total number of collapsed window events where we mark packets");
1697 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK);
1698 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1699 SYSCTL_CHILDREN(rack_counters),
1700 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD,
1701 &rack_collapsed_win_rxt,
1702 "Total number of packets that were retransmitted");
1703 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK);
1704 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1705 SYSCTL_CHILDREN(rack_counters),
1706 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD,
1707 &rack_collapsed_win_rxt_bytes,
1708 "Total number of bytes that were retransmitted");
1709 rack_try_scwnd = counter_u64_alloc(M_WAITOK);
1710 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1711 SYSCTL_CHILDREN(rack_counters),
1712 OID_AUTO, "tried_scwnd", CTLFLAG_RD,
1714 "Total number of scwnd attempts");
1715 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
1716 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1717 OID_AUTO, "outsize", CTLFLAG_RD,
1718 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
1719 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
1720 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1721 OID_AUTO, "opts", CTLFLAG_RD,
1722 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
1723 SYSCTL_ADD_PROC(&rack_sysctl_ctx,
1724 SYSCTL_CHILDREN(rack_sysctl_root),
1725 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1726 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
1730 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a)
1732 if (SEQ_GEQ(b->r_start, a->r_start) &&
1733 SEQ_LT(b->r_start, a->r_end)) {
1735 * The entry b is within the
1737 * a -- |-------------|
1742 * b -- |-----------|
1745 } else if (SEQ_GEQ(b->r_start, a->r_end)) {
1747 * b falls as either the next
1748 * sequence block after a so a
1749 * is said to be smaller than b.
1759 * Whats left is where a is
1760 * larger than b. i.e:
1764 * b -- |--------------|
1769 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1770 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1773 rc_init_window(struct tcp_rack *rack)
1777 if (rack->rc_init_win == 0) {
1779 * Nothing set by the user, use the system stack
1782 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)));
1784 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win;
1789 rack_get_fixed_pacing_bw(struct tcp_rack *rack)
1791 if (IN_FASTRECOVERY(rack->rc_tp->t_flags))
1792 return (rack->r_ctl.rc_fixed_pacing_rate_rec);
1793 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
1794 return (rack->r_ctl.rc_fixed_pacing_rate_ss);
1796 return (rack->r_ctl.rc_fixed_pacing_rate_ca);
1800 rack_get_bw(struct tcp_rack *rack)
1802 if (rack->use_fixed_rate) {
1803 /* Return the fixed pacing rate */
1804 return (rack_get_fixed_pacing_bw(rack));
1806 if (rack->r_ctl.gp_bw == 0) {
1808 * We have yet no b/w measurement,
1809 * if we have a user set initial bw
1810 * return it. If we don't have that and
1811 * we have an srtt, use the tcp IW (10) to
1812 * calculate a fictional b/w over the SRTT
1813 * which is more or less a guess. Note
1814 * we don't use our IW from rack on purpose
1815 * so if we have like IW=30, we are not
1816 * calculating a "huge" b/w.
1819 if (rack->r_ctl.init_rate)
1820 return (rack->r_ctl.init_rate);
1822 /* Has the user set a max peak rate? */
1823 #ifdef NETFLIX_PEAKRATE
1824 if (rack->rc_tp->t_maxpeakrate)
1825 return (rack->rc_tp->t_maxpeakrate);
1827 /* Ok lets come up with the IW guess, if we have a srtt */
1828 if (rack->rc_tp->t_srtt == 0) {
1830 * Go with old pacing method
1831 * i.e. burst mitigation only.
1835 /* Ok lets get the initial TCP win (not racks) */
1836 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp));
1837 srtt = (uint64_t)rack->rc_tp->t_srtt;
1838 bw *= (uint64_t)USECS_IN_SECOND;
1840 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap))
1841 bw = rack->r_ctl.bw_rate_cap;
1846 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
1847 /* Averaging is done, we can return the value */
1848 bw = rack->r_ctl.gp_bw;
1850 /* Still doing initial average must calculate */
1851 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements;
1853 #ifdef NETFLIX_PEAKRATE
1854 if ((rack->rc_tp->t_maxpeakrate) &&
1855 (bw > rack->rc_tp->t_maxpeakrate)) {
1856 /* The user has set a peak rate to pace at
1857 * don't allow us to pace faster than that.
1859 return (rack->rc_tp->t_maxpeakrate);
1862 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap))
1863 bw = rack->r_ctl.bw_rate_cap;
1869 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm)
1871 if (rack->use_fixed_rate) {
1873 } else if (rack->in_probe_rtt && (rsm == NULL))
1874 return (rack->r_ctl.rack_per_of_gp_probertt);
1875 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
1876 rack->r_ctl.rack_per_of_gp_rec)) {
1878 /* a retransmission always use the recovery rate */
1879 return (rack->r_ctl.rack_per_of_gp_rec);
1880 } else if (rack->rack_rec_nonrxt_use_cr) {
1881 /* Directed to use the configured rate */
1882 goto configured_rate;
1883 } else if (rack->rack_no_prr &&
1884 (rack->r_ctl.rack_per_of_gp_rec > 100)) {
1885 /* No PRR, lets just use the b/w estimate only */
1889 * Here we may have a non-retransmit but we
1890 * have no overrides, so just use the recovery
1891 * rate (prr is in effect).
1893 return (rack->r_ctl.rack_per_of_gp_rec);
1897 /* For the configured rate we look at our cwnd vs the ssthresh */
1898 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
1899 return (rack->r_ctl.rack_per_of_gp_ss);
1901 return (rack->r_ctl.rack_per_of_gp_ca);
1905 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6)
1908 * Types of logs (mod value)
1909 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit.
1910 * 2 = a dsack round begins, persist is reset to 16.
1911 * 3 = a dsack round ends
1912 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh
1913 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack
1914 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh.
1916 if (tcp_bblogging_on(rack->rc_tp)) {
1917 union tcp_log_stackspecific log;
1920 memset(&log, 0, sizeof(log));
1921 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based;
1922 log.u_bbr.flex1 <<= 1;
1923 log.u_bbr.flex1 |= rack->rc_rack_use_dsack;
1924 log.u_bbr.flex1 <<= 1;
1925 log.u_bbr.flex1 |= rack->rc_dsack_round_seen;
1926 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end;
1927 log.u_bbr.flex3 = rack->r_ctl.num_dsack;
1928 log.u_bbr.flex4 = flex4;
1929 log.u_bbr.flex5 = flex5;
1930 log.u_bbr.flex6 = flex6;
1931 log.u_bbr.flex7 = rack->r_ctl.dsack_persist;
1932 log.u_bbr.flex8 = mod;
1933 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1934 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1935 &rack->rc_inp->inp_socket->so_rcv,
1936 &rack->rc_inp->inp_socket->so_snd,
1937 RACK_DSACK_HANDLING, 0,
1938 0, &log, false, &tv);
1943 rack_log_hdwr_pacing(struct tcp_rack *rack,
1944 uint64_t rate, uint64_t hw_rate, int line,
1945 int error, uint16_t mod)
1947 if (tcp_bblogging_on(rack->rc_tp)) {
1948 union tcp_log_stackspecific log;
1950 const struct ifnet *ifp;
1952 memset(&log, 0, sizeof(log));
1953 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff);
1954 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff);
1955 if (rack->r_ctl.crte) {
1956 ifp = rack->r_ctl.crte->ptbl->rs_ifp;
1957 } else if (rack->rc_inp->inp_route.ro_nh &&
1958 rack->rc_inp->inp_route.ro_nh->nh_ifp) {
1959 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp;
1963 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff);
1964 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff);
1966 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1967 log.u_bbr.bw_inuse = rate;
1968 log.u_bbr.flex5 = line;
1969 log.u_bbr.flex6 = error;
1970 log.u_bbr.flex7 = mod;
1971 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs;
1972 log.u_bbr.flex8 = rack->use_fixed_rate;
1973 log.u_bbr.flex8 <<= 1;
1974 log.u_bbr.flex8 |= rack->rack_hdrw_pacing;
1975 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
1976 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate;
1977 if (rack->r_ctl.crte)
1978 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate;
1980 log.u_bbr.cur_del_rate = 0;
1981 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req;
1982 TCP_LOG_EVENTP(rack->rc_tp, NULL,
1983 &rack->rc_inp->inp_socket->so_rcv,
1984 &rack->rc_inp->inp_socket->so_snd,
1985 BBR_LOG_HDWR_PACE, 0,
1986 0, &log, false, &tv);
1991 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped)
1994 * We allow rack_per_of_gp_xx to dictate our bw rate we want.
1996 uint64_t bw_est, high_rate;
1999 gain = (uint64_t)rack_get_output_gain(rack, rsm);
2001 bw_est /= (uint64_t)100;
2002 /* Never fall below the minimum (def 64kbps) */
2003 if (bw_est < RACK_MIN_BW)
2004 bw_est = RACK_MIN_BW;
2005 if (rack->r_rack_hw_rate_caps) {
2006 /* Rate caps are in place */
2007 if (rack->r_ctl.crte != NULL) {
2008 /* We have a hdwr rate already */
2009 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
2010 if (bw_est >= high_rate) {
2011 /* We are capping bw at the highest rate table entry */
2012 rack_log_hdwr_pacing(rack,
2013 bw_est, high_rate, __LINE__,
2019 } else if ((rack->rack_hdrw_pacing == 0) &&
2020 (rack->rack_hdw_pace_ena) &&
2021 (rack->rack_attempt_hdwr_pace == 0) &&
2022 (rack->rc_inp->inp_route.ro_nh != NULL) &&
2023 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
2025 * Special case, we have not yet attempted hardware
2026 * pacing, and yet we may, when we do, find out if we are
2027 * above the highest rate. We need to know the maxbw for the interface
2028 * in question (if it supports ratelimiting). We get back
2029 * a 0, if the interface is not found in the RL lists.
2031 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
2033 /* Yep, we have a rate is it above this rate? */
2034 if (bw_est > high_rate) {
2046 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod)
2048 if (tcp_bblogging_on(rack->rc_tp)) {
2049 union tcp_log_stackspecific log;
2052 if ((mod != 1) && (rack_verbose_logging == 0)) {
2054 * We get 3 values currently for mod
2055 * 1 - We are retransmitting and this tells the reason.
2056 * 2 - We are clearing a dup-ack count.
2057 * 3 - We are incrementing a dup-ack count.
2059 * The clear/increment are only logged
2060 * if you have BBverbose on.
2064 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2065 log.u_bbr.flex1 = tsused;
2066 log.u_bbr.flex2 = thresh;
2067 log.u_bbr.flex3 = rsm->r_flags;
2068 log.u_bbr.flex4 = rsm->r_dupack;
2069 log.u_bbr.flex5 = rsm->r_start;
2070 log.u_bbr.flex6 = rsm->r_end;
2071 log.u_bbr.flex8 = mod;
2072 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2073 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2074 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2075 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2076 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2077 log.u_bbr.pacing_gain = rack->r_must_retran;
2078 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2079 &rack->rc_inp->inp_socket->so_rcv,
2080 &rack->rc_inp->inp_socket->so_snd,
2081 BBR_LOG_SETTINGS_CHG, 0,
2082 0, &log, false, &tv);
2087 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
2089 if (tcp_bblogging_on(rack->rc_tp)) {
2090 union tcp_log_stackspecific log;
2093 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2094 log.u_bbr.flex1 = rack->rc_tp->t_srtt;
2095 log.u_bbr.flex2 = to;
2096 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
2097 log.u_bbr.flex4 = slot;
2098 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot;
2099 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2100 log.u_bbr.flex7 = rack->rc_in_persist;
2101 log.u_bbr.flex8 = which;
2102 if (rack->rack_no_prr)
2103 log.u_bbr.pkts_out = 0;
2105 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
2106 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2107 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2108 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2109 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2110 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2111 log.u_bbr.pacing_gain = rack->r_must_retran;
2112 log.u_bbr.cwnd_gain = rack->rc_has_collapsed;
2113 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift;
2114 log.u_bbr.lost = rack_rto_min;
2115 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2116 &rack->rc_inp->inp_socket->so_rcv,
2117 &rack->rc_inp->inp_socket->so_snd,
2118 BBR_LOG_TIMERSTAR, 0,
2119 0, &log, false, &tv);
2124 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm)
2126 if (tcp_bblogging_on(rack->rc_tp)) {
2127 union tcp_log_stackspecific log;
2130 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2131 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2132 log.u_bbr.flex8 = to_num;
2133 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
2134 log.u_bbr.flex2 = rack->rc_rack_rtt;
2136 log.u_bbr.flex3 = 0;
2138 log.u_bbr.flex3 = rsm->r_end - rsm->r_start;
2139 if (rack->rack_no_prr)
2140 log.u_bbr.flex5 = 0;
2142 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2143 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2144 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2145 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2146 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2147 log.u_bbr.pacing_gain = rack->r_must_retran;
2148 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2149 &rack->rc_inp->inp_socket->so_rcv,
2150 &rack->rc_inp->inp_socket->so_snd,
2152 0, &log, false, &tv);
2157 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack,
2158 struct rack_sendmap *prev,
2159 struct rack_sendmap *rsm,
2160 struct rack_sendmap *next,
2161 int flag, uint32_t th_ack, int line)
2163 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
2164 union tcp_log_stackspecific log;
2167 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2168 log.u_bbr.flex8 = flag;
2169 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2170 log.u_bbr.cur_del_rate = (uint64_t)prev;
2171 log.u_bbr.delRate = (uint64_t)rsm;
2172 log.u_bbr.rttProp = (uint64_t)next;
2173 log.u_bbr.flex7 = 0;
2175 log.u_bbr.flex1 = prev->r_start;
2176 log.u_bbr.flex2 = prev->r_end;
2177 log.u_bbr.flex7 |= 0x4;
2180 log.u_bbr.flex3 = rsm->r_start;
2181 log.u_bbr.flex4 = rsm->r_end;
2182 log.u_bbr.flex7 |= 0x2;
2185 log.u_bbr.flex5 = next->r_start;
2186 log.u_bbr.flex6 = next->r_end;
2187 log.u_bbr.flex7 |= 0x1;
2189 log.u_bbr.applimited = line;
2190 log.u_bbr.pkts_out = th_ack;
2191 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2192 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2193 if (rack->rack_no_prr)
2196 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt;
2197 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2198 &rack->rc_inp->inp_socket->so_rcv,
2199 &rack->rc_inp->inp_socket->so_snd,
2201 0, &log, false, &tv);
2206 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len,
2207 struct rack_sendmap *rsm, int conf)
2209 if (tcp_bblogging_on(tp)) {
2210 union tcp_log_stackspecific log;
2212 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2213 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2214 log.u_bbr.flex1 = t;
2215 log.u_bbr.flex2 = len;
2216 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt;
2217 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
2218 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;
2219 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2220 log.u_bbr.flex7 = conf;
2221 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot;
2222 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
2223 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2224 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2225 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags;
2226 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2228 log.u_bbr.pkt_epoch = rsm->r_start;
2229 log.u_bbr.lost = rsm->r_end;
2230 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt;
2231 /* We loose any upper of the 24 bits */
2232 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags;
2235 log.u_bbr.pkt_epoch = rack->rc_tp->iss;
2237 log.u_bbr.cwnd_gain = 0;
2238 log.u_bbr.pacing_gain = 0;
2240 /* Write out general bits of interest rrs here */
2241 log.u_bbr.use_lt_bw = rack->rc_highly_buffered;
2242 log.u_bbr.use_lt_bw <<= 1;
2243 log.u_bbr.use_lt_bw |= rack->forced_ack;
2244 log.u_bbr.use_lt_bw <<= 1;
2245 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul;
2246 log.u_bbr.use_lt_bw <<= 1;
2247 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
2248 log.u_bbr.use_lt_bw <<= 1;
2249 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
2250 log.u_bbr.use_lt_bw <<= 1;
2251 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
2252 log.u_bbr.use_lt_bw <<= 1;
2253 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
2254 log.u_bbr.use_lt_bw <<= 1;
2255 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom;
2256 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight;
2257 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts;
2258 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered;
2259 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts;
2260 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt;
2261 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
2262 log.u_bbr.bw_inuse <<= 32;
2264 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]);
2265 TCP_LOG_EVENTP(tp, NULL,
2266 &rack->rc_inp->inp_socket->so_rcv,
2267 &rack->rc_inp->inp_socket->so_snd,
2269 0, &log, false, &tv);
2276 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
2279 * Log the rtt sample we are
2280 * applying to the srtt algorithm in
2283 if (tcp_bblogging_on(rack->rc_tp)) {
2284 union tcp_log_stackspecific log;
2287 /* Convert our ms to a microsecond */
2288 memset(&log, 0, sizeof(log));
2289 log.u_bbr.flex1 = rtt;
2290 log.u_bbr.flex2 = rack->r_ctl.ack_count;
2291 log.u_bbr.flex3 = rack->r_ctl.sack_count;
2292 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
2293 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra;
2294 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2295 log.u_bbr.flex7 = 1;
2296 log.u_bbr.flex8 = rack->sack_attack_disable;
2297 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2298 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2299 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2300 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2301 log.u_bbr.pacing_gain = rack->r_must_retran;
2303 * We capture in delRate the upper 32 bits as
2304 * the confidence level we had declared, and the
2305 * lower 32 bits as the actual RTT using the arrival
2308 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence;
2309 log.u_bbr.delRate <<= 32;
2310 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt;
2311 /* Lets capture all the things that make up t_rtxcur */
2312 log.u_bbr.applimited = rack_rto_min;
2313 log.u_bbr.epoch = rack_rto_max;
2314 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop;
2315 log.u_bbr.lost = rack_rto_min;
2316 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop);
2317 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp);
2318 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec;
2319 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC;
2320 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec;
2321 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2322 &rack->rc_inp->inp_socket->so_rcv,
2323 &rack->rc_inp->inp_socket->so_snd,
2325 0, &log, false, &tv);
2330 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where)
2332 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
2333 union tcp_log_stackspecific log;
2336 /* Convert our ms to a microsecond */
2337 memset(&log, 0, sizeof(log));
2338 log.u_bbr.flex1 = rtt;
2339 log.u_bbr.flex2 = send_time;
2340 log.u_bbr.flex3 = ack_time;
2341 log.u_bbr.flex4 = where;
2342 log.u_bbr.flex7 = 2;
2343 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2344 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2345 &rack->rc_inp->inp_socket->so_rcv,
2346 &rack->rc_inp->inp_socket->so_snd,
2348 0, &log, false, &tv);
2355 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line)
2357 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
2358 union tcp_log_stackspecific log;
2361 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2362 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2363 log.u_bbr.flex1 = line;
2364 log.u_bbr.flex2 = tick;
2365 log.u_bbr.flex3 = tp->t_maxunacktime;
2366 log.u_bbr.flex4 = tp->t_acktime;
2367 log.u_bbr.flex8 = event;
2368 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2369 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2370 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2371 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2372 log.u_bbr.pacing_gain = rack->r_must_retran;
2373 TCP_LOG_EVENTP(tp, NULL,
2374 &rack->rc_inp->inp_socket->so_rcv,
2375 &rack->rc_inp->inp_socket->so_snd,
2376 BBR_LOG_PROGRESS, 0,
2377 0, &log, false, &tv);
2382 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv)
2384 if (tcp_bblogging_on(rack->rc_tp)) {
2385 union tcp_log_stackspecific log;
2387 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2388 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2389 log.u_bbr.flex1 = slot;
2390 if (rack->rack_no_prr)
2391 log.u_bbr.flex2 = 0;
2393 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt;
2394 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
2395 log.u_bbr.flex8 = rack->rc_in_persist;
2396 log.u_bbr.timeStamp = cts;
2397 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2398 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2399 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2400 log.u_bbr.pacing_gain = rack->r_must_retran;
2401 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2402 &rack->rc_inp->inp_socket->so_rcv,
2403 &rack->rc_inp->inp_socket->so_snd,
2405 0, &log, false, tv);
2410 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs)
2412 if (tcp_bblogging_on(rack->rc_tp)) {
2413 union tcp_log_stackspecific log;
2416 memset(&log, 0, sizeof(log));
2417 log.u_bbr.flex1 = did_out;
2418 log.u_bbr.flex2 = nxt_pkt;
2419 log.u_bbr.flex3 = way_out;
2420 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2421 if (rack->rack_no_prr)
2422 log.u_bbr.flex5 = 0;
2424 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2425 log.u_bbr.flex6 = nsegs;
2426 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs;
2427 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */
2428 log.u_bbr.flex7 <<= 1;
2429 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */
2430 log.u_bbr.flex7 <<= 1;
2431 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */
2432 log.u_bbr.flex8 = rack->rc_in_persist;
2433 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2434 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2435 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2436 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
2437 log.u_bbr.use_lt_bw <<= 1;
2438 log.u_bbr.use_lt_bw |= rack->r_might_revert;
2439 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2440 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2441 log.u_bbr.pacing_gain = rack->r_must_retran;
2442 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2443 &rack->rc_inp->inp_socket->so_rcv,
2444 &rack->rc_inp->inp_socket->so_snd,
2445 BBR_LOG_DOSEG_DONE, 0,
2446 0, &log, false, &tv);
2451 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm)
2453 if (tcp_bblogging_on(rack->rc_tp)) {
2454 union tcp_log_stackspecific log;
2457 memset(&log, 0, sizeof(log));
2458 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs;
2459 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
2460 log.u_bbr.flex4 = arg1;
2461 log.u_bbr.flex5 = arg2;
2462 log.u_bbr.flex6 = arg3;
2463 log.u_bbr.flex8 = frm;
2464 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2465 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2466 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2467 log.u_bbr.applimited = rack->r_ctl.rc_sacked;
2468 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2469 log.u_bbr.pacing_gain = rack->r_must_retran;
2470 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv,
2471 &tptosocket(tp)->so_snd,
2472 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv);
2477 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot,
2478 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use)
2480 if (tcp_bblogging_on(rack->rc_tp)) {
2481 union tcp_log_stackspecific log;
2484 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2485 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2486 log.u_bbr.flex1 = slot;
2487 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
2488 log.u_bbr.flex4 = reason;
2489 if (rack->rack_no_prr)
2490 log.u_bbr.flex5 = 0;
2492 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2493 log.u_bbr.flex7 = hpts_calling;
2494 log.u_bbr.flex8 = rack->rc_in_persist;
2495 log.u_bbr.lt_epoch = cwnd_to_use;
2496 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2497 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2498 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2499 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2500 log.u_bbr.pacing_gain = rack->r_must_retran;
2501 log.u_bbr.cwnd_gain = rack->rc_has_collapsed;
2502 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2503 &rack->rc_inp->inp_socket->so_rcv,
2504 &rack->rc_inp->inp_socket->so_snd,
2506 tlen, &log, false, &tv);
2511 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts,
2512 struct timeval *tv, uint32_t flags_on_entry)
2514 if (tcp_bblogging_on(rack->rc_tp)) {
2515 union tcp_log_stackspecific log;
2517 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2518 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2519 log.u_bbr.flex1 = line;
2520 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to;
2521 log.u_bbr.flex3 = flags_on_entry;
2522 log.u_bbr.flex4 = us_cts;
2523 if (rack->rack_no_prr)
2524 log.u_bbr.flex5 = 0;
2526 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2527 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2528 log.u_bbr.flex7 = hpts_removed;
2529 log.u_bbr.flex8 = 1;
2530 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags;
2531 log.u_bbr.timeStamp = us_cts;
2532 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2533 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2534 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2535 log.u_bbr.pacing_gain = rack->r_must_retran;
2536 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2537 &rack->rc_inp->inp_socket->so_rcv,
2538 &rack->rc_inp->inp_socket->so_snd,
2539 BBR_LOG_TIMERCANC, 0,
2540 0, &log, false, tv);
2545 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
2546 uint32_t flex1, uint32_t flex2,
2547 uint32_t flex3, uint32_t flex4,
2548 uint32_t flex5, uint32_t flex6,
2549 uint16_t flex7, uint8_t mod)
2551 if (tcp_bblogging_on(rack->rc_tp)) {
2552 union tcp_log_stackspecific log;
2556 /* No you can't use 1, its for the real to cancel */
2559 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2560 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2561 log.u_bbr.flex1 = flex1;
2562 log.u_bbr.flex2 = flex2;
2563 log.u_bbr.flex3 = flex3;
2564 log.u_bbr.flex4 = flex4;
2565 log.u_bbr.flex5 = flex5;
2566 log.u_bbr.flex6 = flex6;
2567 log.u_bbr.flex7 = flex7;
2568 log.u_bbr.flex8 = mod;
2569 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2570 &rack->rc_inp->inp_socket->so_rcv,
2571 &rack->rc_inp->inp_socket->so_snd,
2572 BBR_LOG_TIMERCANC, 0,
2573 0, &log, false, &tv);
2578 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
2580 if (tcp_bblogging_on(rack->rc_tp)) {
2581 union tcp_log_stackspecific log;
2584 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2585 log.u_bbr.flex1 = timers;
2586 log.u_bbr.flex2 = ret;
2587 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
2588 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2589 log.u_bbr.flex5 = cts;
2590 if (rack->rack_no_prr)
2591 log.u_bbr.flex6 = 0;
2593 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt;
2594 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2595 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2596 log.u_bbr.pacing_gain = rack->r_must_retran;
2597 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2598 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2599 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2600 &rack->rc_inp->inp_socket->so_rcv,
2601 &rack->rc_inp->inp_socket->so_snd,
2602 BBR_LOG_TO_PROCESS, 0,
2603 0, &log, false, &tv);
2608 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line)
2610 if (tcp_bblogging_on(rack->rc_tp)) {
2611 union tcp_log_stackspecific log;
2614 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2615 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out;
2616 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs;
2617 if (rack->rack_no_prr)
2618 log.u_bbr.flex3 = 0;
2620 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt;
2621 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered;
2622 log.u_bbr.flex5 = rack->r_ctl.rc_sacked;
2623 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt;
2624 log.u_bbr.flex7 = line;
2625 log.u_bbr.flex8 = frm;
2626 log.u_bbr.pkts_out = orig_cwnd;
2627 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2628 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2629 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
2630 log.u_bbr.use_lt_bw <<= 1;
2631 log.u_bbr.use_lt_bw |= rack->r_might_revert;
2632 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2633 &rack->rc_inp->inp_socket->so_rcv,
2634 &rack->rc_inp->inp_socket->so_snd,
2636 0, &log, false, &tv);
2640 #ifdef NETFLIX_EXP_DETECTION
2642 rack_log_sad(struct tcp_rack *rack, int event)
2644 if (tcp_bblogging_on(rack->rc_tp)) {
2645 union tcp_log_stackspecific log;
2648 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2649 log.u_bbr.flex1 = rack->r_ctl.sack_count;
2650 log.u_bbr.flex2 = rack->r_ctl.ack_count;
2651 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra;
2652 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
2653 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced;
2654 log.u_bbr.flex6 = tcp_sack_to_ack_thresh;
2655 log.u_bbr.pkts_out = tcp_sack_to_move_thresh;
2656 log.u_bbr.lt_epoch = (tcp_force_detection << 8);
2657 log.u_bbr.lt_epoch |= rack->do_detection;
2658 log.u_bbr.applimited = tcp_map_minimum;
2659 log.u_bbr.flex7 = rack->sack_attack_disable;
2660 log.u_bbr.flex8 = event;
2661 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2662 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2663 log.u_bbr.delivered = tcp_sad_decay_val;
2664 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2665 &rack->rc_inp->inp_socket->so_rcv,
2666 &rack->rc_inp->inp_socket->so_snd,
2667 TCP_SAD_DETECTION, 0,
2668 0, &log, false, &tv);
2674 rack_counter_destroy(void)
2676 counter_u64_free(rack_fto_send);
2677 counter_u64_free(rack_fto_rsm_send);
2678 counter_u64_free(rack_nfto_resend);
2679 counter_u64_free(rack_hw_pace_init_fail);
2680 counter_u64_free(rack_hw_pace_lost);
2681 counter_u64_free(rack_non_fto_send);
2682 counter_u64_free(rack_extended_rfo);
2683 counter_u64_free(rack_ack_total);
2684 counter_u64_free(rack_express_sack);
2685 counter_u64_free(rack_sack_total);
2686 counter_u64_free(rack_move_none);
2687 counter_u64_free(rack_move_some);
2688 counter_u64_free(rack_sack_attacks_detected);
2689 counter_u64_free(rack_sack_attacks_reversed);
2690 counter_u64_free(rack_sack_used_next_merge);
2691 counter_u64_free(rack_sack_used_prev_merge);
2692 counter_u64_free(rack_tlp_tot);
2693 counter_u64_free(rack_tlp_newdata);
2694 counter_u64_free(rack_tlp_retran);
2695 counter_u64_free(rack_tlp_retran_bytes);
2696 counter_u64_free(rack_to_tot);
2697 counter_u64_free(rack_saw_enobuf);
2698 counter_u64_free(rack_saw_enobuf_hw);
2699 counter_u64_free(rack_saw_enetunreach);
2700 counter_u64_free(rack_hot_alloc);
2701 counter_u64_free(rack_to_alloc);
2702 counter_u64_free(rack_to_alloc_hard);
2703 counter_u64_free(rack_to_alloc_emerg);
2704 counter_u64_free(rack_to_alloc_limited);
2705 counter_u64_free(rack_alloc_limited_conns);
2706 counter_u64_free(rack_split_limited);
2707 counter_u64_free(rack_multi_single_eq);
2708 counter_u64_free(rack_proc_non_comp_ack);
2709 counter_u64_free(rack_sack_proc_all);
2710 counter_u64_free(rack_sack_proc_restart);
2711 counter_u64_free(rack_sack_proc_short);
2712 counter_u64_free(rack_sack_skipped_acked);
2713 counter_u64_free(rack_sack_splits);
2714 counter_u64_free(rack_input_idle_reduces);
2715 counter_u64_free(rack_collapsed_win);
2716 counter_u64_free(rack_collapsed_win_rxt);
2717 counter_u64_free(rack_collapsed_win_rxt_bytes);
2718 counter_u64_free(rack_collapsed_win_seen);
2719 counter_u64_free(rack_try_scwnd);
2720 counter_u64_free(rack_persists_sends);
2721 counter_u64_free(rack_persists_acks);
2722 counter_u64_free(rack_persists_loss);
2723 counter_u64_free(rack_persists_lost_ends);
2725 counter_u64_free(rack_adjust_map_bw);
2727 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
2728 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
2731 static struct rack_sendmap *
2732 rack_alloc(struct tcp_rack *rack)
2734 struct rack_sendmap *rsm;
2737 * First get the top of the list it in
2738 * theory is the "hottest" rsm we have,
2739 * possibly just freed by ack processing.
2741 if (rack->rc_free_cnt > rack_free_cache) {
2742 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
2743 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2744 counter_u64_add(rack_hot_alloc, 1);
2745 rack->rc_free_cnt--;
2749 * Once we get under our free cache we probably
2750 * no longer have a "hot" one available. Lets
2753 rsm = uma_zalloc(rack_zone, M_NOWAIT);
2755 rack->r_ctl.rc_num_maps_alloced++;
2756 counter_u64_add(rack_to_alloc, 1);
2760 * Dig in to our aux rsm's (the last two) since
2761 * UMA failed to get us one.
2763 if (rack->rc_free_cnt) {
2764 counter_u64_add(rack_to_alloc_emerg, 1);
2765 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
2766 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2767 rack->rc_free_cnt--;
2773 static struct rack_sendmap *
2774 rack_alloc_full_limit(struct tcp_rack *rack)
2776 if ((V_tcp_map_entries_limit > 0) &&
2777 (rack->do_detection == 0) &&
2778 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
2779 counter_u64_add(rack_to_alloc_limited, 1);
2780 if (!rack->alloc_limit_reported) {
2781 rack->alloc_limit_reported = 1;
2782 counter_u64_add(rack_alloc_limited_conns, 1);
2786 return (rack_alloc(rack));
2789 /* wrapper to allocate a sendmap entry, subject to a specific limit */
2790 static struct rack_sendmap *
2791 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type)
2793 struct rack_sendmap *rsm;
2796 /* currently there is only one limit type */
2797 if (V_tcp_map_split_limit > 0 &&
2798 (rack->do_detection == 0) &&
2799 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) {
2800 counter_u64_add(rack_split_limited, 1);
2801 if (!rack->alloc_limit_reported) {
2802 rack->alloc_limit_reported = 1;
2803 counter_u64_add(rack_alloc_limited_conns, 1);
2809 /* allocate and mark in the limit type, if set */
2810 rsm = rack_alloc(rack);
2811 if (rsm != NULL && limit_type) {
2812 rsm->r_limit_type = limit_type;
2813 rack->r_ctl.rc_num_split_allocs++;
2819 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
2821 if (rsm->r_flags & RACK_APP_LIMITED) {
2822 if (rack->r_ctl.rc_app_limited_cnt > 0) {
2823 rack->r_ctl.rc_app_limited_cnt--;
2826 if (rsm->r_limit_type) {
2827 /* currently there is only one limit type */
2828 rack->r_ctl.rc_num_split_allocs--;
2830 if (rsm == rack->r_ctl.rc_first_appl) {
2831 if (rack->r_ctl.rc_app_limited_cnt == 0)
2832 rack->r_ctl.rc_first_appl = NULL;
2834 /* Follow the next one out */
2835 struct rack_sendmap fe;
2837 fe.r_start = rsm->r_nseq_appl;
2838 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
2841 if (rsm == rack->r_ctl.rc_resend)
2842 rack->r_ctl.rc_resend = NULL;
2843 if (rsm == rack->r_ctl.rc_end_appl)
2844 rack->r_ctl.rc_end_appl = NULL;
2845 if (rack->r_ctl.rc_tlpsend == rsm)
2846 rack->r_ctl.rc_tlpsend = NULL;
2847 if (rack->r_ctl.rc_sacklast == rsm)
2848 rack->r_ctl.rc_sacklast = NULL;
2849 memset(rsm, 0, sizeof(struct rack_sendmap));
2850 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext);
2851 rack->rc_free_cnt++;
2855 rack_free_trim(struct tcp_rack *rack)
2857 struct rack_sendmap *rsm;
2860 * Free up all the tail entries until
2861 * we get our list down to the limit.
2863 while (rack->rc_free_cnt > rack_free_cache) {
2864 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head);
2865 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2866 rack->rc_free_cnt--;
2867 uma_zfree(rack_zone, rsm);
2873 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack)
2875 uint64_t srtt, bw, len, tim;
2876 uint32_t segsiz, def_len, minl;
2878 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
2879 def_len = rack_def_data_window * segsiz;
2880 if (rack->rc_gp_filled == 0) {
2882 * We have no measurement (IW is in flight?) so
2883 * we can only guess using our data_window sysctl
2884 * value (usually 20MSS).
2889 * Now we have a number of factors to consider.
2891 * 1) We have a desired BDP which is usually
2893 * 2) We have a minimum number of rtt's usually 1 SRTT
2894 * but we allow it too to be more.
2895 * 3) We want to make sure a measurement last N useconds (if
2896 * we have set rack_min_measure_usec.
2898 * We handle the first concern here by trying to create a data
2899 * window of max(rack_def_data_window, DesiredBDP). The
2900 * second concern we handle in not letting the measurement
2901 * window end normally until at least the required SRTT's
2902 * have gone by which is done further below in
2903 * rack_enough_for_measurement(). Finally the third concern
2904 * we also handle here by calculating how long that time
2905 * would take at the current BW and then return the
2906 * max of our first calculation and that length. Note
2907 * that if rack_min_measure_usec is 0, we don't deal
2908 * with concern 3. Also for both Concern 1 and 3 an
2909 * application limited period could end the measurement
2912 * So lets calculate the BDP with the "known" b/w using
2913 * the SRTT has our rtt and then multiply it by the
2916 bw = rack_get_bw(rack);
2917 srtt = (uint64_t)tp->t_srtt;
2919 len /= (uint64_t)HPTS_USEC_IN_SEC;
2920 len *= max(1, rack_goal_bdp);
2921 /* Now we need to round up to the nearest MSS */
2922 len = roundup(len, segsiz);
2923 if (rack_min_measure_usec) {
2924 /* Now calculate our min length for this b/w */
2925 tim = rack_min_measure_usec;
2926 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC;
2929 minl = roundup(minl, segsiz);
2934 * Now if we have a very small window we want
2935 * to attempt to get the window that is
2936 * as small as possible. This happens on
2937 * low b/w connections and we don't want to
2938 * span huge numbers of rtt's between measurements.
2940 * We basically include 2 over our "MIN window" so
2941 * that the measurement can be shortened (possibly) by
2945 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz)));
2947 return (max((uint32_t)len, def_len));
2952 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality)
2954 uint32_t tim, srtts, segsiz;
2957 * Has enough time passed for the GP measurement to be valid?
2959 if ((tp->snd_max == tp->snd_una) ||
2960 (th_ack == tp->snd_max)){
2962 *quality = RACK_QUALITY_ALLACKED;
2965 if (SEQ_LT(th_ack, tp->gput_seq)) {
2966 /* Not enough bytes yet */
2969 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
2970 if (SEQ_LT(th_ack, tp->gput_ack) &&
2971 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
2972 /* Not enough bytes yet */
2975 if (rack->r_ctl.rc_first_appl &&
2976 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) {
2978 * We are up to the app limited send point
2979 * we have to measure irrespective of the time..
2981 *quality = RACK_QUALITY_APPLIMITED;
2984 /* Now what about time? */
2985 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts);
2986 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts;
2988 *quality = RACK_QUALITY_HIGH;
2991 /* Nope not even a full SRTT has passed */
2996 rack_log_timely(struct tcp_rack *rack,
2997 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd,
2998 uint64_t up_bnd, int line, uint8_t method)
3000 if (tcp_bblogging_on(rack->rc_tp)) {
3001 union tcp_log_stackspecific log;
3004 memset(&log, 0, sizeof(log));
3005 log.u_bbr.flex1 = logged;
3006 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt;
3007 log.u_bbr.flex2 <<= 4;
3008 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt;
3009 log.u_bbr.flex2 <<= 4;
3010 log.u_bbr.flex2 |= rack->rc_gp_incr;
3011 log.u_bbr.flex2 <<= 4;
3012 log.u_bbr.flex2 |= rack->rc_gp_bwred;
3013 log.u_bbr.flex3 = rack->rc_gp_incr;
3014 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
3015 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca;
3016 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec;
3017 log.u_bbr.flex7 = rack->rc_gp_bwred;
3018 log.u_bbr.flex8 = method;
3019 log.u_bbr.cur_del_rate = cur_bw;
3020 log.u_bbr.delRate = low_bnd;
3021 log.u_bbr.bw_inuse = up_bnd;
3022 log.u_bbr.rttProp = rack_get_bw(rack);
3023 log.u_bbr.pkt_epoch = line;
3024 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
3025 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3026 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3027 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
3028 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
3029 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom;
3030 log.u_bbr.cwnd_gain <<= 1;
3031 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec;
3032 log.u_bbr.cwnd_gain <<= 1;
3033 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
3034 log.u_bbr.cwnd_gain <<= 1;
3035 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
3036 log.u_bbr.lost = rack->r_ctl.rc_loss_count;
3037 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3038 &rack->rc_inp->inp_socket->so_rcv,
3039 &rack->rc_inp->inp_socket->so_snd,
3041 0, &log, false, &tv);
3046 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult)
3049 * Before we increase we need to know if
3050 * the estimate just made was less than
3051 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est)
3053 * If we already are pacing at a fast enough
3054 * rate to push us faster there is no sense of
3057 * We first caculate our actual pacing rate (ss or ca multiplier
3058 * times our cur_bw).
3060 * Then we take the last measured rate and multipy by our
3061 * maximum pacing overage to give us a max allowable rate.
3063 * If our act_rate is smaller than our max_allowable rate
3064 * then we should increase. Else we should hold steady.
3067 uint64_t act_rate, max_allow_rate;
3069 if (rack_timely_no_stopping)
3072 if ((cur_bw == 0) || (last_bw_est == 0)) {
3074 * Initial startup case or
3075 * everything is acked case.
3077 rack_log_timely(rack, mult, cur_bw, 0, 0,
3083 * We can always pace at or slightly above our rate.
3085 rack_log_timely(rack, mult, cur_bw, 0, 0,
3089 act_rate = cur_bw * (uint64_t)mult;
3091 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100);
3092 max_allow_rate /= 100;
3093 if (act_rate < max_allow_rate) {
3095 * Here the rate we are actually pacing at
3096 * is smaller than 10% above our last measurement.
3097 * This means we are pacing below what we would
3098 * like to try to achieve (plus some wiggle room).
3100 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
3105 * Here we are already pacing at least rack_max_per_above(10%)
3106 * what we are getting back. This indicates most likely
3107 * that we are being limited (cwnd/rwnd/app) and can't
3108 * get any more b/w. There is no sense of trying to
3109 * raise up the pacing rate its not speeding us up
3110 * and we already are pacing faster than we are getting.
3112 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
3119 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack)
3122 * When we drag bottom, we want to assure
3123 * that no multiplier is below 1.0, if so
3124 * we want to restore it to at least that.
3126 if (rack->r_ctl.rack_per_of_gp_rec < 100) {
3127 /* This is unlikely we usually do not touch recovery */
3128 rack->r_ctl.rack_per_of_gp_rec = 100;
3130 if (rack->r_ctl.rack_per_of_gp_ca < 100) {
3131 rack->r_ctl.rack_per_of_gp_ca = 100;
3133 if (rack->r_ctl.rack_per_of_gp_ss < 100) {
3134 rack->r_ctl.rack_per_of_gp_ss = 100;
3139 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack)
3141 if (rack->r_ctl.rack_per_of_gp_ca > 100) {
3142 rack->r_ctl.rack_per_of_gp_ca = 100;
3144 if (rack->r_ctl.rack_per_of_gp_ss > 100) {
3145 rack->r_ctl.rack_per_of_gp_ss = 100;
3150 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override)
3152 int32_t calc, logged, plus;
3158 * override is passed when we are
3159 * loosing b/w and making one last
3160 * gasp at trying to not loose out
3161 * to a new-reno flow.
3165 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */
3166 if (rack->rc_gp_incr &&
3167 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) {
3169 * Reset and get 5 strokes more before the boost. Note
3170 * that the count is 0 based so we have to add one.
3173 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST;
3174 rack->rc_gp_timely_inc_cnt = 0;
3176 plus = (uint32_t)rack_gp_increase_per;
3177 /* Must be at least 1% increase for true timely increases */
3179 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0)))
3181 if (rack->rc_gp_saw_rec &&
3182 (rack->rc_gp_no_rec_chg == 0) &&
3183 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3184 rack->r_ctl.rack_per_of_gp_rec)) {
3185 /* We have been in recovery ding it too */
3186 calc = rack->r_ctl.rack_per_of_gp_rec + plus;
3190 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc;
3191 if (rack_per_upper_bound_ss &&
3192 (rack->rc_dragged_bottom == 0) &&
3193 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss))
3194 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss;
3196 if (rack->rc_gp_saw_ca &&
3197 (rack->rc_gp_saw_ss == 0) &&
3198 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3199 rack->r_ctl.rack_per_of_gp_ca)) {
3201 calc = rack->r_ctl.rack_per_of_gp_ca + plus;
3205 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc;
3206 if (rack_per_upper_bound_ca &&
3207 (rack->rc_dragged_bottom == 0) &&
3208 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca))
3209 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca;
3211 if (rack->rc_gp_saw_ss &&
3212 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3213 rack->r_ctl.rack_per_of_gp_ss)) {
3215 calc = rack->r_ctl.rack_per_of_gp_ss + plus;
3218 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc;
3219 if (rack_per_upper_bound_ss &&
3220 (rack->rc_dragged_bottom == 0) &&
3221 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss))
3222 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss;
3226 (rack->rc_gp_incr == 0)){
3227 /* Go into increment mode */
3228 rack->rc_gp_incr = 1;
3229 rack->rc_gp_timely_inc_cnt = 0;
3231 if (rack->rc_gp_incr &&
3233 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) {
3234 rack->rc_gp_timely_inc_cnt++;
3236 rack_log_timely(rack, logged, plus, 0, 0,
3241 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff)
3244 * norm_grad = rtt_diff / minrtt;
3245 * new_per = curper * (1 - B * norm_grad)
3247 * B = rack_gp_decrease_per (default 10%)
3248 * rtt_dif = input var current rtt-diff
3249 * curper = input var current percentage
3250 * minrtt = from rack filter
3255 perf = (((uint64_t)curper * ((uint64_t)1000000 -
3256 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 *
3257 (((uint64_t)rtt_diff * (uint64_t)1000000)/
3258 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/
3259 (uint64_t)1000000)) /
3261 if (perf > curper) {
3265 return ((uint32_t)perf);
3269 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt)
3273 * result = curper * (1 - (B * ( 1 - ------ ))
3276 * B = rack_gp_decrease_per (default 10%)
3277 * highrttthresh = filter_min * rack_gp_rtt_maxmul
3280 uint32_t highrttthresh;
3282 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
3284 perf = (((uint64_t)curper * ((uint64_t)1000000 -
3285 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 -
3286 ((uint64_t)highrttthresh * (uint64_t)1000000) /
3287 (uint64_t)rtt)) / 100)) /(uint64_t)1000000);
3292 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff)
3294 uint64_t logvar, logvar2, logvar3;
3295 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val;
3297 if (rack->rc_gp_incr) {
3298 /* Turn off increment counting */
3299 rack->rc_gp_incr = 0;
3300 rack->rc_gp_timely_inc_cnt = 0;
3302 ss_red = ca_red = rec_red = 0;
3304 /* Calculate the reduction value */
3308 /* Must be at least 1% reduction */
3309 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) {
3310 /* We have been in recovery ding it too */
3311 if (timely_says == 2) {
3312 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt);
3313 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3319 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3320 if (rack->r_ctl.rack_per_of_gp_rec > val) {
3321 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val);
3322 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val;
3324 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
3327 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec)
3328 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
3331 if (rack->rc_gp_saw_ss) {
3333 if (timely_says == 2) {
3334 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt);
3335 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3341 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff);
3342 if (rack->r_ctl.rack_per_of_gp_ss > new_per) {
3343 ss_red = rack->r_ctl.rack_per_of_gp_ss - val;
3344 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val;
3347 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
3351 logvar2 = (uint32_t)rtt;
3353 logvar2 |= (uint32_t)rtt_diff;
3354 logvar3 = rack_gp_rtt_maxmul;
3356 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3357 rack_log_timely(rack, timely_says,
3359 logvar, __LINE__, 10);
3361 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss)
3362 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
3364 } else if (rack->rc_gp_saw_ca) {
3366 if (timely_says == 2) {
3367 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt);
3368 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3374 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff);
3375 if (rack->r_ctl.rack_per_of_gp_ca > val) {
3376 ca_red = rack->r_ctl.rack_per_of_gp_ca - val;
3377 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val;
3379 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
3384 logvar2 = (uint32_t)rtt;
3386 logvar2 |= (uint32_t)rtt_diff;
3387 logvar3 = rack_gp_rtt_maxmul;
3389 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3390 rack_log_timely(rack, timely_says,
3392 logvar, __LINE__, 10);
3394 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca)
3395 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
3398 if (rack->rc_gp_timely_dec_cnt < 0x7) {
3399 rack->rc_gp_timely_dec_cnt++;
3400 if (rack_timely_dec_clear &&
3401 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear))
3402 rack->rc_gp_timely_dec_cnt = 0;
3407 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar,
3412 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts,
3413 uint32_t rtt, uint32_t line, uint8_t reas)
3415 if (tcp_bblogging_on(rack->rc_tp)) {
3416 union tcp_log_stackspecific log;
3419 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3420 log.u_bbr.flex1 = line;
3421 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts;
3422 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts;
3423 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
3424 log.u_bbr.flex5 = rtt;
3425 log.u_bbr.flex6 = rack->rc_highly_buffered;
3426 log.u_bbr.flex6 <<= 1;
3427 log.u_bbr.flex6 |= rack->forced_ack;
3428 log.u_bbr.flex6 <<= 1;
3429 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul;
3430 log.u_bbr.flex6 <<= 1;
3431 log.u_bbr.flex6 |= rack->in_probe_rtt;
3432 log.u_bbr.flex6 <<= 1;
3433 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt;
3434 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt;
3435 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca;
3436 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec;
3437 log.u_bbr.flex8 = reas;
3438 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3439 log.u_bbr.delRate = rack_get_bw(rack);
3440 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt;
3441 log.u_bbr.cur_del_rate <<= 32;
3442 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt;
3443 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered;
3444 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
3445 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3446 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
3447 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
3448 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts;
3449 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight;
3450 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3451 log.u_bbr.rttProp = us_cts;
3452 log.u_bbr.rttProp <<= 32;
3453 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt;
3454 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3455 &rack->rc_inp->inp_socket->so_rcv,
3456 &rack->rc_inp->inp_socket->so_snd,
3457 BBR_LOG_RTT_SHRINKS, 0,
3458 0, &log, false, &rack->r_ctl.act_rcv_time);
3463 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt)
3467 bwdp = rack_get_bw(rack);
3468 bwdp *= (uint64_t)rtt;
3469 bwdp /= (uint64_t)HPTS_USEC_IN_SEC;
3470 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz);
3471 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) {
3473 * A window protocol must be able to have 4 packets
3474 * outstanding as the floor in order to function
3475 * (especially considering delayed ack :D).
3477 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs);
3482 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts)
3485 * ProbeRTT is a bit different in rack_pacing than in
3486 * BBR. It is like BBR in that it uses the lowering of
3487 * the RTT as a signal that we saw something new and
3488 * counts from there for how long between. But it is
3489 * different in that its quite simple. It does not
3490 * play with the cwnd and wait until we get down
3491 * to N segments outstanding and hold that for
3492 * 200ms. Instead it just sets the pacing reduction
3493 * rate to a set percentage (70 by default) and hold
3494 * that for a number of recent GP Srtt's.
3498 if (rack->rc_gp_dyn_mul == 0)
3501 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) {
3505 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
3506 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
3508 * Stop the goodput now, the idea here is
3509 * that future measurements with in_probe_rtt
3510 * won't register if they are not greater so
3511 * we want to get what info (if any) is available
3514 rack_do_goodput_measurement(rack->rc_tp, rack,
3515 rack->rc_tp->snd_una, __LINE__,
3516 RACK_QUALITY_PROBERTT);
3518 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
3519 rack->r_ctl.rc_time_probertt_entered = us_cts;
3520 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
3521 rack->r_ctl.rc_pace_min_segs);
3522 rack->in_probe_rtt = 1;
3523 rack->measure_saw_probe_rtt = 1;
3524 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
3525 rack->r_ctl.rc_time_probertt_starts = 0;
3526 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt;
3527 if (rack_probertt_use_min_rtt_entry)
3528 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
3530 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt);
3531 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3532 __LINE__, RACK_RTTS_ENTERPROBE);
3536 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts)
3538 struct rack_sendmap *rsm;
3541 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
3542 rack->r_ctl.rc_pace_min_segs);
3543 rack->in_probe_rtt = 0;
3544 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
3545 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
3547 * Stop the goodput now, the idea here is
3548 * that future measurements with in_probe_rtt
3549 * won't register if they are not greater so
3550 * we want to get what info (if any) is available
3553 rack_do_goodput_measurement(rack->rc_tp, rack,
3554 rack->rc_tp->snd_una, __LINE__,
3555 RACK_QUALITY_PROBERTT);
3556 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
3558 * We don't have enough data to make a measurement.
3559 * So lets just stop and start here after exiting
3560 * probe-rtt. We probably are not interested in
3561 * the results anyway.
3563 rack->rc_tp->t_flags &= ~TF_GPUTINPROG;
3566 * Measurements through the current snd_max are going
3567 * to be limited by the slower pacing rate.
3569 * We need to mark these as app-limited so we
3570 * don't collapse the b/w.
3572 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
3573 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
3574 if (rack->r_ctl.rc_app_limited_cnt == 0)
3575 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
3578 * Go out to the end app limited and mark
3579 * this new one as next and move the end_appl up
3582 if (rack->r_ctl.rc_end_appl)
3583 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
3584 rack->r_ctl.rc_end_appl = rsm;
3586 rsm->r_flags |= RACK_APP_LIMITED;
3587 rack->r_ctl.rc_app_limited_cnt++;
3590 * Now, we need to examine our pacing rate multipliers.
3591 * If its under 100%, we need to kick it back up to
3592 * 100%. We also don't let it be over our "max" above
3593 * the actual rate i.e. 100% + rack_clamp_atexit_prtt.
3594 * Note setting clamp_atexit_prtt to 0 has the effect
3595 * of setting CA/SS to 100% always at exit (which is
3596 * the default behavior).
3598 if (rack_probertt_clear_is) {
3599 rack->rc_gp_incr = 0;
3600 rack->rc_gp_bwred = 0;
3601 rack->rc_gp_timely_inc_cnt = 0;
3602 rack->rc_gp_timely_dec_cnt = 0;
3604 /* Do we do any clamping at exit? */
3605 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) {
3606 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp;
3607 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp;
3609 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) {
3610 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt;
3611 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt;
3614 * Lets set rtt_diff to 0, so that we will get a "boost"
3617 rack->r_ctl.rc_rtt_diff = 0;
3619 /* Clear all flags so we start fresh */
3620 rack->rc_tp->t_bytes_acked = 0;
3621 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND;
3623 * If configured to, set the cwnd and ssthresh to
3626 if (rack_probe_rtt_sets_cwnd) {
3630 /* Set ssthresh so we get into CA once we hit our target */
3631 if (rack_probertt_use_min_rtt_exit == 1) {
3632 /* Set to min rtt */
3633 rack_set_prtt_target(rack, segsiz,
3634 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
3635 } else if (rack_probertt_use_min_rtt_exit == 2) {
3636 /* Set to current gp rtt */
3637 rack_set_prtt_target(rack, segsiz,
3638 rack->r_ctl.rc_gp_srtt);
3639 } else if (rack_probertt_use_min_rtt_exit == 3) {
3640 /* Set to entry gp rtt */
3641 rack_set_prtt_target(rack, segsiz,
3642 rack->r_ctl.rc_entry_gp_rtt);
3647 sum = rack->r_ctl.rc_entry_gp_rtt;
3649 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt));
3652 * A highly buffered path needs
3653 * cwnd space for timely to work.
3654 * Lets set things up as if
3655 * we are heading back here again.
3657 setval = rack->r_ctl.rc_entry_gp_rtt;
3658 } else if (sum >= 15) {
3660 * Lets take the smaller of the
3661 * two since we are just somewhat
3664 setval = rack->r_ctl.rc_gp_srtt;
3665 if (setval > rack->r_ctl.rc_entry_gp_rtt)
3666 setval = rack->r_ctl.rc_entry_gp_rtt;
3669 * Here we are not highly buffered
3670 * and should pick the min we can to
3671 * keep from causing loss.
3673 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3675 rack_set_prtt_target(rack, segsiz,
3678 if (rack_probe_rtt_sets_cwnd > 1) {
3679 /* There is a percentage here to boost */
3680 ebdp = rack->r_ctl.rc_target_probertt_flight;
3681 ebdp *= rack_probe_rtt_sets_cwnd;
3683 setto = rack->r_ctl.rc_target_probertt_flight + ebdp;
3685 setto = rack->r_ctl.rc_target_probertt_flight;
3686 rack->rc_tp->snd_cwnd = roundup(setto, segsiz);
3687 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) {
3689 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs;
3691 /* If we set in the cwnd also set the ssthresh point so we are in CA */
3692 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1);
3694 rack_log_rtt_shrinks(rack, us_cts,
3695 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3696 __LINE__, RACK_RTTS_EXITPROBE);
3697 /* Clear times last so log has all the info */
3698 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max;
3699 rack->r_ctl.rc_time_probertt_entered = us_cts;
3700 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
3701 rack->r_ctl.rc_time_of_last_probertt = us_cts;
3705 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts)
3707 /* Check in on probe-rtt */
3708 if (rack->rc_gp_filled == 0) {
3709 /* We do not do p-rtt unless we have gp measurements */
3712 if (rack->in_probe_rtt) {
3713 uint64_t no_overflow;
3714 uint32_t endtime, must_stay;
3716 if (rack->r_ctl.rc_went_idle_time &&
3717 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) {
3719 * We went idle during prtt, just exit now.
3721 rack_exit_probertt(rack, us_cts);
3722 } else if (rack_probe_rtt_safety_val &&
3723 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) &&
3724 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) {
3726 * Probe RTT safety value triggered!
3728 rack_log_rtt_shrinks(rack, us_cts,
3729 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3730 __LINE__, RACK_RTTS_SAFETY);
3731 rack_exit_probertt(rack, us_cts);
3733 /* Calculate the max we will wait */
3734 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait);
3735 if (rack->rc_highly_buffered)
3736 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp);
3737 /* Calculate the min we must wait */
3738 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain);
3739 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) &&
3740 TSTMP_LT(us_cts, endtime)) {
3742 /* Do we lower more? */
3744 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered))
3745 calc = us_cts - rack->r_ctl.rc_time_probertt_entered;
3748 calc /= max(rack->r_ctl.rc_gp_srtt, 1);
3751 calc *= rack_per_of_gp_probertt_reduce;
3752 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc;
3754 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh)
3755 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh;
3757 /* We must reach target or the time set */
3760 if (rack->r_ctl.rc_time_probertt_starts == 0) {
3761 if ((TSTMP_LT(us_cts, must_stay) &&
3762 rack->rc_highly_buffered) ||
3763 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) >
3764 rack->r_ctl.rc_target_probertt_flight)) {
3765 /* We are not past the must_stay time */
3768 rack_log_rtt_shrinks(rack, us_cts,
3769 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3770 __LINE__, RACK_RTTS_REACHTARGET);
3771 rack->r_ctl.rc_time_probertt_starts = us_cts;
3772 if (rack->r_ctl.rc_time_probertt_starts == 0)
3773 rack->r_ctl.rc_time_probertt_starts = 1;
3774 /* Restore back to our rate we want to pace at in prtt */
3775 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
3778 * Setup our end time, some number of gp_srtts plus 200ms.
3780 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt *
3781 (uint64_t)rack_probertt_gpsrtt_cnt_mul);
3782 if (rack_probertt_gpsrtt_cnt_div)
3783 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div);
3786 endtime += rack_min_probertt_hold;
3787 endtime += rack->r_ctl.rc_time_probertt_starts;
3788 if (TSTMP_GEQ(us_cts, endtime)) {
3789 /* yes, exit probertt */
3790 rack_exit_probertt(rack, us_cts);
3793 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) {
3794 /* Go into probertt, its been too long since we went lower */
3795 rack_enter_probertt(rack, us_cts);
3800 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est,
3801 uint32_t rtt, int32_t rtt_diff)
3803 uint64_t cur_bw, up_bnd, low_bnd, subfr;
3806 if ((rack->rc_gp_dyn_mul == 0) ||
3807 (rack->use_fixed_rate) ||
3808 (rack->in_probe_rtt) ||
3809 (rack->rc_always_pace == 0)) {
3810 /* No dynamic GP multiplier in play */
3813 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start;
3814 cur_bw = rack_get_bw(rack);
3815 /* Calculate our up and down range */
3816 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up;
3818 up_bnd += rack->r_ctl.last_gp_comp_bw;
3820 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down;
3822 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr;
3823 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) {
3825 * This is the case where our RTT is above
3826 * the max target and we have been configured
3827 * to just do timely no bonus up stuff in that case.
3829 * There are two configurations, set to 1, and we
3830 * just do timely if we are over our max. If its
3831 * set above 1 then we slam the multipliers down
3832 * to 100 and then decrement per timely.
3834 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3836 if (rack->r_ctl.rc_no_push_at_mrtt > 1)
3837 rack_validate_multipliers_at_or_below_100(rack);
3838 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
3839 } else if ((last_bw_est < low_bnd) && !losses) {
3841 * We are decreasing this is a bit complicated this
3842 * means we are loosing ground. This could be
3843 * because another flow entered and we are competing
3844 * for b/w with it. This will push the RTT up which
3845 * makes timely unusable unless we want to get shoved
3846 * into a corner and just be backed off (the age
3847 * old problem with delay based CC).
3849 * On the other hand if it was a route change we
3850 * would like to stay somewhat contained and not
3851 * blow out the buffers.
3853 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3855 rack->r_ctl.last_gp_comp_bw = cur_bw;
3856 if (rack->rc_gp_bwred == 0) {
3857 /* Go into reduction counting */
3858 rack->rc_gp_bwred = 1;
3859 rack->rc_gp_timely_dec_cnt = 0;
3861 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) ||
3862 (timely_says == 0)) {
3864 * Push another time with a faster pacing
3865 * to try to gain back (we include override to
3866 * get a full raise factor).
3868 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) ||
3869 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) ||
3870 (timely_says == 0) ||
3871 (rack_down_raise_thresh == 0)) {
3873 * Do an override up in b/w if we were
3874 * below the threshold or if the threshold
3875 * is zero we always do the raise.
3877 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1);
3879 /* Log it stays the same */
3880 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0,
3883 rack->rc_gp_timely_dec_cnt++;
3884 /* We are not incrementing really no-count */
3885 rack->rc_gp_incr = 0;
3886 rack->rc_gp_timely_inc_cnt = 0;
3889 * Lets just use the RTT
3890 * information and give up
3895 } else if ((timely_says != 2) &&
3897 (last_bw_est > up_bnd)) {
3899 * We are increasing b/w lets keep going, updating
3900 * our b/w and ignoring any timely input, unless
3901 * of course we are at our max raise (if there is one).
3904 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3906 rack->r_ctl.last_gp_comp_bw = cur_bw;
3907 if (rack->rc_gp_saw_ss &&
3908 rack_per_upper_bound_ss &&
3909 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) {
3911 * In cases where we can't go higher
3912 * we should just use timely.
3916 if (rack->rc_gp_saw_ca &&
3917 rack_per_upper_bound_ca &&
3918 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) {
3920 * In cases where we can't go higher
3921 * we should just use timely.
3925 rack->rc_gp_bwred = 0;
3926 rack->rc_gp_timely_dec_cnt = 0;
3927 /* You get a set number of pushes if timely is trying to reduce */
3928 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) {
3929 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
3931 /* Log it stays the same */
3932 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0,
3938 * We are staying between the lower and upper range bounds
3939 * so use timely to decide.
3941 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3945 rack->rc_gp_incr = 0;
3946 rack->rc_gp_timely_inc_cnt = 0;
3947 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) &&
3949 (last_bw_est < low_bnd)) {
3950 /* We are loosing ground */
3951 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
3952 rack->rc_gp_timely_dec_cnt++;
3953 /* We are not incrementing really no-count */
3954 rack->rc_gp_incr = 0;
3955 rack->rc_gp_timely_inc_cnt = 0;
3957 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
3959 rack->rc_gp_bwred = 0;
3960 rack->rc_gp_timely_dec_cnt = 0;
3961 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
3967 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt)
3969 int32_t timely_says;
3970 uint64_t log_mult, log_rtt_a_diff;
3972 log_rtt_a_diff = rtt;
3973 log_rtt_a_diff <<= 32;
3974 log_rtt_a_diff |= (uint32_t)rtt_diff;
3975 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) *
3976 rack_gp_rtt_maxmul)) {
3977 /* Reduce the b/w multiplier */
3979 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
3981 log_mult |= prev_rtt;
3982 rack_log_timely(rack, timely_says, log_mult,
3983 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3984 log_rtt_a_diff, __LINE__, 4);
3985 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
3986 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
3987 max(rack_gp_rtt_mindiv , 1)))) {
3988 /* Increase the b/w multiplier */
3989 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
3990 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
3991 max(rack_gp_rtt_mindiv , 1));
3993 log_mult |= prev_rtt;
3995 rack_log_timely(rack, timely_says, log_mult ,
3996 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3997 log_rtt_a_diff, __LINE__, 5);
4000 * Use a gradient to find it the timely gradient
4002 * grad = rc_rtt_diff / min_rtt;
4004 * anything below or equal to 0 will be
4005 * a increase indication. Anything above
4006 * zero is a decrease. Note we take care
4007 * of the actual gradient calculation
4008 * in the reduction (its not needed for
4011 log_mult = prev_rtt;
4012 if (rtt_diff <= 0) {
4014 * Rttdiff is less than zero, increase the
4015 * b/w multiplier (its 0 or negative)
4018 rack_log_timely(rack, timely_says, log_mult,
4019 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6);
4021 /* Reduce the b/w multiplier */
4023 rack_log_timely(rack, timely_says, log_mult,
4024 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7);
4027 return (timely_says);
4031 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
4032 tcp_seq th_ack, int line, uint8_t quality)
4034 uint64_t tim, bytes_ps, ltim, stim, utim;
4035 uint32_t segsiz, bytes, reqbytes, us_cts;
4036 int32_t gput, new_rtt_diff, timely_says;
4037 uint64_t resid_bw, subpart = 0, addpart = 0, srtt;
4040 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
4041 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
4042 if (TSTMP_GEQ(us_cts, tp->gput_ts))
4043 tim = us_cts - tp->gput_ts;
4046 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts)
4047 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts;
4051 * Use the larger of the send time or ack time. This prevents us
4052 * from being influenced by ack artifacts to come up with too
4053 * high of measurement. Note that since we are spanning over many more
4054 * bytes in most of our measurements hopefully that is less likely to
4060 utim = max(stim, 1);
4061 /* Lets get a msec time ltim too for the old stuff */
4062 ltim = max(1, (utim / HPTS_USEC_IN_MSEC));
4063 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim;
4064 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz));
4065 if ((tim == 0) && (stim == 0)) {
4067 * Invalid measurement time, maybe
4068 * all on one ack/one send?
4072 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4073 0, 0, 0, 10, __LINE__, NULL, quality);
4074 goto skip_measurement;
4076 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) {
4077 /* We never made a us_rtt measurement? */
4080 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4081 0, 0, 0, 10, __LINE__, NULL, quality);
4082 goto skip_measurement;
4085 * Calculate the maximum possible b/w this connection
4086 * could have. We base our calculation on the lowest
4087 * rtt we have seen during the measurement and the
4088 * largest rwnd the client has given us in that time. This
4089 * forms a BDP that is the maximum that we could ever
4090 * get to the client. Anything larger is not valid.
4092 * I originally had code here that rejected measurements
4093 * where the time was less than 1/2 the latest us_rtt.
4094 * But after thinking on that I realized its wrong since
4095 * say you had a 150Mbps or even 1Gbps link, and you
4096 * were a long way away.. example I am in Europe (100ms rtt)
4097 * talking to my 1Gbps link in S.C. Now measuring say 150,000
4098 * bytes my time would be 1.2ms, and yet my rtt would say
4099 * the measurement was invalid the time was < 50ms. The
4100 * same thing is true for 150Mb (8ms of time).
4102 * A better way I realized is to look at what the maximum
4103 * the connection could possibly do. This is gated on
4104 * the lowest RTT we have seen and the highest rwnd.
4105 * We should in theory never exceed that, if we are
4106 * then something on the path is storing up packets
4107 * and then feeding them all at once to our endpoint
4108 * messing up our measurement.
4110 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd;
4111 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC;
4112 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt;
4113 if (SEQ_LT(th_ack, tp->gput_seq)) {
4114 /* No measurement can be made */
4117 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4118 0, 0, 0, 10, __LINE__, NULL, quality);
4119 goto skip_measurement;
4121 bytes = (th_ack - tp->gput_seq);
4122 bytes_ps = (uint64_t)bytes;
4124 * Don't measure a b/w for pacing unless we have gotten at least
4125 * an initial windows worth of data in this measurement interval.
4127 * Small numbers of bytes get badly influenced by delayed ack and
4128 * other artifacts. Note we take the initial window or our
4129 * defined minimum GP (defaulting to 10 which hopefully is the
4132 if (rack->rc_gp_filled == 0) {
4134 * The initial estimate is special. We
4135 * have blasted out an IW worth of packets
4136 * without a real valid ack ts results. We
4137 * then setup the app_limited_needs_set flag,
4138 * this should get the first ack in (probably 2
4139 * MSS worth) to be recorded as the timestamp.
4140 * We thus allow a smaller number of bytes i.e.
4143 reqbytes -= (2 * segsiz);
4144 /* Also lets fill previous for our first measurement to be neutral */
4145 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
4147 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) {
4148 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4149 rack->r_ctl.rc_app_limited_cnt,
4150 0, 0, 10, __LINE__, NULL, quality);
4151 goto skip_measurement;
4154 * We now need to calculate the Timely like status so
4155 * we can update (possibly) the b/w multipliers.
4157 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt;
4158 if (rack->rc_gp_filled == 0) {
4159 /* No previous reading */
4160 rack->r_ctl.rc_rtt_diff = new_rtt_diff;
4162 if (rack->measure_saw_probe_rtt == 0) {
4164 * We don't want a probertt to be counted
4165 * since it will be negative incorrectly. We
4166 * expect to be reducing the RTT when we
4167 * pace at a slower rate.
4169 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8);
4170 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8);
4173 timely_says = rack_make_timely_judgement(rack,
4174 rack->r_ctl.rc_gp_srtt,
4175 rack->r_ctl.rc_rtt_diff,
4176 rack->r_ctl.rc_prev_gp_srtt
4178 bytes_ps *= HPTS_USEC_IN_SEC;
4180 if (bytes_ps > rack->r_ctl.last_max_bw) {
4182 * Something is on path playing
4183 * since this b/w is not possible based
4184 * on our BDP (highest rwnd and lowest rtt
4185 * we saw in the measurement window).
4187 * Another option here would be to
4188 * instead skip the measurement.
4190 rack_log_pacing_delay_calc(rack, bytes, reqbytes,
4191 bytes_ps, rack->r_ctl.last_max_bw, 0,
4192 11, __LINE__, NULL, quality);
4193 bytes_ps = rack->r_ctl.last_max_bw;
4195 /* We store gp for b/w in bytes per second */
4196 if (rack->rc_gp_filled == 0) {
4197 /* Initial measurement */
4199 rack->r_ctl.gp_bw = bytes_ps;
4200 rack->rc_gp_filled = 1;
4201 rack->r_ctl.num_measurements = 1;
4202 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
4204 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4205 rack->r_ctl.rc_app_limited_cnt,
4206 0, 0, 10, __LINE__, NULL, quality);
4208 if (tcp_in_hpts(rack->rc_inp) &&
4209 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
4211 * Ok we can't trust the pacer in this case
4212 * where we transition from un-paced to paced.
4213 * Or for that matter when the burst mitigation
4214 * was making a wild guess and got it wrong.
4215 * Stop the pacer and clear up all the aggregate
4218 tcp_hpts_remove(rack->rc_inp);
4219 rack->r_ctl.rc_hpts_flags = 0;
4220 rack->r_ctl.rc_last_output_to = 0;
4223 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) {
4224 /* Still a small number run an average */
4225 rack->r_ctl.gp_bw += bytes_ps;
4226 addpart = rack->r_ctl.num_measurements;
4227 rack->r_ctl.num_measurements++;
4228 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
4229 /* We have collected enough to move forward */
4230 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements;
4235 * We want to take 1/wma of the goodput and add in to 7/8th
4236 * of the old value weighted by the srtt. So if your measurement
4237 * period is say 2 SRTT's long you would get 1/4 as the
4238 * value, if it was like 1/2 SRTT then you would get 1/16th.
4240 * But we must be careful not to take too much i.e. if the
4241 * srtt is say 20ms and the measurement is taken over
4242 * 400ms our weight would be 400/20 i.e. 20. On the
4243 * other hand if we get a measurement over 1ms with a
4244 * 10ms rtt we only want to take a much smaller portion.
4246 if (rack->r_ctl.num_measurements < 0xff) {
4247 rack->r_ctl.num_measurements++;
4249 srtt = (uint64_t)tp->t_srtt;
4252 * Strange why did t_srtt go back to zero?
4254 if (rack->r_ctl.rc_rack_min_rtt)
4255 srtt = rack->r_ctl.rc_rack_min_rtt;
4257 srtt = HPTS_USEC_IN_MSEC;
4260 * XXXrrs: Note for reviewers, in playing with
4261 * dynamic pacing I discovered this GP calculation
4262 * as done originally leads to some undesired results.
4263 * Basically you can get longer measurements contributing
4264 * too much to the WMA. Thus I changed it if you are doing
4265 * dynamic adjustments to only do the aportioned adjustment
4266 * if we have a very small (time wise) measurement. Longer
4267 * measurements just get there weight (defaulting to 1/8)
4268 * add to the WMA. We may want to think about changing
4269 * this to always do that for both sides i.e. dynamic
4270 * and non-dynamic... but considering lots of folks
4271 * were playing with this I did not want to change the
4272 * calculation per.se. without your thoughts.. Lawerence?
4275 if (rack->rc_gp_dyn_mul == 0) {
4276 subpart = rack->r_ctl.gp_bw * utim;
4277 subpart /= (srtt * 8);
4278 if (subpart < (rack->r_ctl.gp_bw / 2)) {
4280 * The b/w update takes no more
4281 * away then 1/2 our running total
4284 addpart = bytes_ps * utim;
4285 addpart /= (srtt * 8);
4288 * Don't allow a single measurement
4289 * to account for more than 1/2 of the
4290 * WMA. This could happen on a retransmission
4291 * where utim becomes huge compared to
4292 * srtt (multiple retransmissions when using
4293 * the sending rate which factors in all the
4294 * transmissions from the first one).
4296 subpart = rack->r_ctl.gp_bw / 2;
4297 addpart = bytes_ps / 2;
4299 resid_bw = rack->r_ctl.gp_bw - subpart;
4300 rack->r_ctl.gp_bw = resid_bw + addpart;
4303 if ((utim / srtt) <= 1) {
4305 * The b/w update was over a small period
4306 * of time. The idea here is to prevent a small
4307 * measurement time period from counting
4308 * too much. So we scale it based on the
4309 * time so it attributes less than 1/rack_wma_divisor
4310 * of its measurement.
4312 subpart = rack->r_ctl.gp_bw * utim;
4313 subpart /= (srtt * rack_wma_divisor);
4314 addpart = bytes_ps * utim;
4315 addpart /= (srtt * rack_wma_divisor);
4318 * The scaled measurement was long
4319 * enough so lets just add in the
4320 * portion of the measurement i.e. 1/rack_wma_divisor
4322 subpart = rack->r_ctl.gp_bw / rack_wma_divisor;
4323 addpart = bytes_ps / rack_wma_divisor;
4325 if ((rack->measure_saw_probe_rtt == 0) ||
4326 (bytes_ps > rack->r_ctl.gp_bw)) {
4328 * For probe-rtt we only add it in
4329 * if its larger, all others we just
4333 resid_bw = rack->r_ctl.gp_bw - subpart;
4334 rack->r_ctl.gp_bw = resid_bw + addpart;
4338 if ((rack->gp_ready == 0) &&
4339 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
4340 /* We have enough measurements now */
4342 rack_set_cc_pacing(rack);
4343 if (rack->defer_options)
4344 rack_apply_deferred_options(rack);
4346 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim,
4347 rack_get_bw(rack), 22, did_add, NULL, quality);
4348 /* We do not update any multipliers if we are in or have seen a probe-rtt */
4349 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set)
4350 rack_update_multiplier(rack, timely_says, bytes_ps,
4351 rack->r_ctl.rc_gp_srtt,
4352 rack->r_ctl.rc_rtt_diff);
4353 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim,
4354 rack_get_bw(rack), 3, line, NULL, quality);
4355 /* reset the gp srtt and setup the new prev */
4356 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
4357 /* Record the lost count for the next measurement */
4358 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count;
4360 * We restart our diffs based on the gpsrtt in the
4361 * measurement window.
4363 rack->rc_gp_rtt_set = 0;
4364 rack->rc_gp_saw_rec = 0;
4365 rack->rc_gp_saw_ca = 0;
4366 rack->rc_gp_saw_ss = 0;
4367 rack->rc_dragged_bottom = 0;
4371 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
4374 * XXXLAS: This is a temporary hack, and should be
4375 * chained off VOI_TCP_GPUT when stats(9) grows an
4376 * API to deal with chained VOIs.
4378 if (tp->t_stats_gput_prev > 0)
4379 stats_voi_update_abs_s32(tp->t_stats,
4381 ((gput - tp->t_stats_gput_prev) * 100) /
4382 tp->t_stats_gput_prev);
4384 tp->t_flags &= ~TF_GPUTINPROG;
4385 tp->t_stats_gput_prev = gput;
4387 * Now are we app limited now and there is space from where we
4388 * were to where we want to go?
4390 * We don't do the other case i.e. non-applimited here since
4391 * the next send will trigger us picking up the missing data.
4393 if (rack->r_ctl.rc_first_appl &&
4394 TCPS_HAVEESTABLISHED(tp->t_state) &&
4395 rack->r_ctl.rc_app_limited_cnt &&
4396 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) &&
4397 ((rack->r_ctl.rc_first_appl->r_end - th_ack) >
4398 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
4400 * Yep there is enough outstanding to make a measurement here.
4402 struct rack_sendmap *rsm, fe;
4404 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
4405 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
4406 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
4407 rack->app_limited_needs_set = 0;
4408 tp->gput_seq = th_ack;
4409 if (rack->in_probe_rtt)
4410 rack->measure_saw_probe_rtt = 1;
4411 else if ((rack->measure_saw_probe_rtt) &&
4412 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
4413 rack->measure_saw_probe_rtt = 0;
4414 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) {
4415 /* There is a full window to gain info from */
4416 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
4418 /* We can only measure up to the applimited point */
4419 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack);
4420 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) {
4422 * We don't have enough to make a measurement.
4424 tp->t_flags &= ~TF_GPUTINPROG;
4425 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
4426 0, 0, 0, 6, __LINE__, NULL, quality);
4430 if (tp->t_state >= TCPS_FIN_WAIT_1) {
4432 * We will get no more data into the SB
4433 * this means we need to have the data available
4434 * before we start a measurement.
4436 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) {
4437 /* Nope not enough data. */
4441 tp->t_flags |= TF_GPUTINPROG;
4443 * Now we need to find the timestamp of the send at tp->gput_seq
4444 * for the send based measurement.
4446 fe.r_start = tp->gput_seq;
4447 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
4449 /* Ok send-based limit is set */
4450 if (SEQ_LT(rsm->r_start, tp->gput_seq)) {
4452 * Move back to include the earlier part
4453 * so our ack time lines up right (this may
4454 * make an overlapping measurement but thats
4457 tp->gput_seq = rsm->r_start;
4459 if (rsm->r_flags & RACK_ACKED)
4460 tp->gput_ts = (uint32_t)rsm->r_ack_arrival;
4462 rack->app_limited_needs_set = 1;
4463 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
4466 * If we don't find the rsm due to some
4467 * send-limit set the current time, which
4468 * basically disables the send-limit.
4473 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
4475 rack_log_pacing_delay_calc(rack,
4480 rack->r_ctl.rc_app_limited_cnt,
4482 __LINE__, NULL, quality);
4487 * CC wrapper hook functions
4490 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs,
4491 uint16_t type, int32_t recovery)
4493 uint32_t prior_cwnd, acked;
4494 struct tcp_log_buffer *lgb = NULL;
4495 uint8_t labc_to_use, quality;
4497 INP_WLOCK_ASSERT(tptoinpcb(tp));
4498 tp->t_ccv.nsegs = nsegs;
4499 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una);
4500 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
4503 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp);
4504 if (tp->t_ccv.bytes_this_ack > max) {
4505 tp->t_ccv.bytes_this_ack = max;
4509 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
4510 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd);
4512 quality = RACK_QUALITY_NONE;
4513 if ((tp->t_flags & TF_GPUTINPROG) &&
4514 rack_enough_for_measurement(tp, rack, th_ack, &quality)) {
4515 /* Measure the Goodput */
4516 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality);
4517 #ifdef NETFLIX_PEAKRATE
4518 if ((type == CC_ACK) &&
4519 (tp->t_maxpeakrate)) {
4521 * We update t_peakrate_thr. This gives us roughly
4522 * one update per round trip time. Note
4523 * it will only be used if pace_always is off i.e
4524 * we don't do this for paced flows.
4526 rack_update_peakrate_thr(tp);
4530 /* Which way our we limited, if not cwnd limited no advance in CA */
4531 if (tp->snd_cwnd <= tp->snd_wnd)
4532 tp->t_ccv.flags |= CCF_CWND_LIMITED;
4534 tp->t_ccv.flags &= ~CCF_CWND_LIMITED;
4535 if (tp->snd_cwnd > tp->snd_ssthresh) {
4536 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack,
4537 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp));
4538 /* For the setting of a window past use the actual scwnd we are using */
4539 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) {
4540 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use;
4541 tp->t_ccv.flags |= CCF_ABC_SENTAWND;
4544 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND;
4545 tp->t_bytes_acked = 0;
4547 prior_cwnd = tp->snd_cwnd;
4548 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec ||
4549 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf)))
4550 labc_to_use = rack->rc_labc;
4552 labc_to_use = rack_max_abc_post_recovery;
4553 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
4554 union tcp_log_stackspecific log;
4557 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4558 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
4559 log.u_bbr.flex1 = th_ack;
4560 log.u_bbr.flex2 = tp->t_ccv.flags;
4561 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack;
4562 log.u_bbr.flex4 = tp->t_ccv.nsegs;
4563 log.u_bbr.flex5 = labc_to_use;
4564 log.u_bbr.flex6 = prior_cwnd;
4565 log.u_bbr.flex7 = V_tcp_do_newsack;
4566 log.u_bbr.flex8 = 1;
4567 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
4568 0, &log, false, NULL, NULL, 0, &tv);
4570 if (CC_ALGO(tp)->ack_received != NULL) {
4571 /* XXXLAS: Find a way to live without this */
4572 tp->t_ccv.curack = th_ack;
4573 tp->t_ccv.labc = labc_to_use;
4574 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC;
4575 CC_ALGO(tp)->ack_received(&tp->t_ccv, type);
4578 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd;
4580 if (rack->r_must_retran) {
4581 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) {
4583 * We now are beyond the rxt point so lets disable
4586 rack->r_ctl.rc_out_at_rto = 0;
4587 rack->r_must_retran = 0;
4588 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) {
4590 * Only decrement the rc_out_at_rto if the cwnd advances
4591 * at least a whole segment. Otherwise next time the peer
4592 * acks, we won't be able to send this generaly happens
4593 * when we are in Congestion Avoidance.
4595 if (acked <= rack->r_ctl.rc_out_at_rto){
4596 rack->r_ctl.rc_out_at_rto -= acked;
4598 rack->r_ctl.rc_out_at_rto = 0;
4603 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use);
4605 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) {
4606 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use;
4608 #ifdef NETFLIX_PEAKRATE
4609 /* we enforce max peak rate if it is set and we are not pacing */
4610 if ((rack->rc_always_pace == 0) &&
4611 tp->t_peakrate_thr &&
4612 (tp->snd_cwnd > tp->t_peakrate_thr)) {
4613 tp->snd_cwnd = tp->t_peakrate_thr;
4619 tcp_rack_partialack(struct tcpcb *tp)
4621 struct tcp_rack *rack;
4623 rack = (struct tcp_rack *)tp->t_fb_ptr;
4624 INP_WLOCK_ASSERT(tptoinpcb(tp));
4626 * If we are doing PRR and have enough
4627 * room to send <or> we are pacing and prr
4628 * is disabled we will want to see if we
4629 * can send data (by setting r_wanted_output to
4632 if ((rack->r_ctl.rc_prr_sndcnt > 0) ||
4634 rack->r_wanted_output = 1;
4638 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack)
4640 struct tcp_rack *rack;
4643 orig_cwnd = tp->snd_cwnd;
4644 INP_WLOCK_ASSERT(tptoinpcb(tp));
4645 rack = (struct tcp_rack *)tp->t_fb_ptr;
4646 /* only alert CC if we alerted when we entered */
4647 if (CC_ALGO(tp)->post_recovery != NULL) {
4648 tp->t_ccv.curack = th_ack;
4649 CC_ALGO(tp)->post_recovery(&tp->t_ccv);
4650 if (tp->snd_cwnd < tp->snd_ssthresh) {
4652 * Rack has burst control and pacing
4653 * so lets not set this any lower than
4654 * snd_ssthresh per RFC-6582 (option 2).
4656 tp->snd_cwnd = tp->snd_ssthresh;
4659 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
4660 union tcp_log_stackspecific log;
4663 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4664 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
4665 log.u_bbr.flex1 = th_ack;
4666 log.u_bbr.flex2 = tp->t_ccv.flags;
4667 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack;
4668 log.u_bbr.flex4 = tp->t_ccv.nsegs;
4669 log.u_bbr.flex5 = V_tcp_abc_l_var;
4670 log.u_bbr.flex6 = orig_cwnd;
4671 log.u_bbr.flex7 = V_tcp_do_newsack;
4672 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
4673 log.u_bbr.flex8 = 2;
4674 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
4675 0, &log, false, NULL, NULL, 0, &tv);
4677 if ((rack->rack_no_prr == 0) &&
4678 (rack->no_prr_addback == 0) &&
4679 (rack->r_ctl.rc_prr_sndcnt > 0)) {
4681 * Suck the next prr cnt back into cwnd, but
4682 * only do that if we are not application limited.
4684 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) {
4686 * We are allowed to add back to the cwnd the amount we did
4688 * a) no_prr_addback is off.
4689 * b) we are not app limited
4690 * c) we are doing prr
4692 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none).
4694 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax),
4695 rack->r_ctl.rc_prr_sndcnt);
4697 rack->r_ctl.rc_prr_sndcnt = 0;
4698 rack_log_to_prr(rack, 1, 0, __LINE__);
4700 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__);
4701 tp->snd_recover = tp->snd_una;
4702 if (rack->r_ctl.dsack_persist) {
4703 rack->r_ctl.dsack_persist--;
4704 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
4705 rack->r_ctl.num_dsack = 0;
4707 rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
4709 EXIT_RECOVERY(tp->t_flags);
4713 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line)
4715 struct tcp_rack *rack;
4716 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd;
4718 INP_WLOCK_ASSERT(tptoinpcb(tp));
4720 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
4722 if (IN_RECOVERY(tp->t_flags) == 0) {
4723 in_rec_at_entry = 0;
4724 ssthresh_enter = tp->snd_ssthresh;
4725 cwnd_enter = tp->snd_cwnd;
4727 in_rec_at_entry = 1;
4728 rack = (struct tcp_rack *)tp->t_fb_ptr;
4731 tp->t_flags &= ~TF_WASFRECOVERY;
4732 tp->t_flags &= ~TF_WASCRECOVERY;
4733 if (!IN_FASTRECOVERY(tp->t_flags)) {
4734 rack->r_ctl.rc_prr_delivered = 0;
4735 rack->r_ctl.rc_prr_out = 0;
4736 if (rack->rack_no_prr == 0) {
4737 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
4738 rack_log_to_prr(rack, 2, in_rec_at_entry, line);
4740 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
4741 tp->snd_recover = tp->snd_max;
4742 if (tp->t_flags2 & TF2_ECN_PERMIT)
4743 tp->t_flags2 |= TF2_ECN_SND_CWR;
4747 if (!IN_CONGRECOVERY(tp->t_flags) ||
4749 * Allow ECN reaction on ACK to CWR, if
4750 * that data segment was also CE marked.
4752 SEQ_GEQ(ack, tp->snd_recover)) {
4753 EXIT_CONGRECOVERY(tp->t_flags);
4754 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd);
4755 tp->snd_recover = tp->snd_max + 1;
4756 if (tp->t_flags2 & TF2_ECN_PERMIT)
4757 tp->t_flags2 |= TF2_ECN_SND_CWR;
4762 tp->t_bytes_acked = 0;
4763 EXIT_RECOVERY(tp->t_flags);
4764 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 /
4765 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp);
4766 orig_cwnd = tp->snd_cwnd;
4767 tp->snd_cwnd = ctf_fixed_maxseg(tp);
4768 rack_log_to_prr(rack, 16, orig_cwnd, line);
4769 if (tp->t_flags2 & TF2_ECN_PERMIT)
4770 tp->t_flags2 |= TF2_ECN_SND_CWR;
4773 KMOD_TCPSTAT_INC(tcps_sndrexmitbad);
4774 /* RTO was unnecessary, so reset everything. */
4775 tp->snd_cwnd = tp->snd_cwnd_prev;
4776 tp->snd_ssthresh = tp->snd_ssthresh_prev;
4777 tp->snd_recover = tp->snd_recover_prev;
4778 if (tp->t_flags & TF_WASFRECOVERY) {
4779 ENTER_FASTRECOVERY(tp->t_flags);
4780 tp->t_flags &= ~TF_WASFRECOVERY;
4782 if (tp->t_flags & TF_WASCRECOVERY) {
4783 ENTER_CONGRECOVERY(tp->t_flags);
4784 tp->t_flags &= ~TF_WASCRECOVERY;
4786 tp->snd_nxt = tp->snd_max;
4787 tp->t_badrxtwin = 0;
4790 if ((CC_ALGO(tp)->cong_signal != NULL) &&
4792 tp->t_ccv.curack = ack;
4793 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type);
4795 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) {
4796 rack_log_to_prr(rack, 15, cwnd_enter, line);
4797 rack->r_ctl.dsack_byte_cnt = 0;
4798 rack->r_ctl.retran_during_recovery = 0;
4799 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter;
4800 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter;
4801 rack->r_ent_rec_ns = 1;
4806 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp)
4810 INP_WLOCK_ASSERT(tptoinpcb(tp));
4812 #ifdef NETFLIX_STATS
4813 KMOD_TCPSTAT_INC(tcps_idle_restarts);
4814 if (tp->t_state == TCPS_ESTABLISHED)
4815 KMOD_TCPSTAT_INC(tcps_idle_estrestarts);
4817 if (CC_ALGO(tp)->after_idle != NULL)
4818 CC_ALGO(tp)->after_idle(&tp->t_ccv);
4820 if (tp->snd_cwnd == 1)
4821 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
4823 i_cwnd = rc_init_window(rack);
4826 * Being idle is no different than the initial window. If the cc
4827 * clamps it down below the initial window raise it to the initial
4830 if (tp->snd_cwnd < i_cwnd) {
4831 tp->snd_cwnd = i_cwnd;
4836 * Indicate whether this ack should be delayed. We can delay the ack if
4837 * following conditions are met:
4838 * - There is no delayed ack timer in progress.
4839 * - Our last ack wasn't a 0-sized window. We never want to delay
4840 * the ack that opens up a 0-sized window.
4841 * - LRO wasn't used for this segment. We make sure by checking that the
4842 * segment size is not larger than the MSS.
4843 * - Delayed acks are enabled or this is a half-synchronized T/TCP
4846 #define DELAY_ACK(tp, tlen) \
4847 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
4848 ((tp->t_flags & TF_DELACK) == 0) && \
4849 (tlen <= tp->t_maxseg) && \
4850 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
4852 static struct rack_sendmap *
4853 rack_find_lowest_rsm(struct tcp_rack *rack)
4855 struct rack_sendmap *rsm;
4858 * Walk the time-order transmitted list looking for an rsm that is
4859 * not acked. This will be the one that was sent the longest time
4860 * ago that is still outstanding.
4862 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
4863 if (rsm->r_flags & RACK_ACKED) {
4872 static struct rack_sendmap *
4873 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
4875 struct rack_sendmap *prsm;
4878 * Walk the sequence order list backward until we hit and arrive at
4879 * the highest seq not acked. In theory when this is called it
4880 * should be the last segment (which it was not).
4883 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) {
4884 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
4893 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts)
4899 * lro is the flag we use to determine if we have seen reordering.
4900 * If it gets set we have seen reordering. The reorder logic either
4901 * works in one of two ways:
4903 * If reorder-fade is configured, then we track the last time we saw
4904 * re-ordering occur. If we reach the point where enough time as
4905 * passed we no longer consider reordering has occuring.
4907 * Or if reorder-face is 0, then once we see reordering we consider
4908 * the connection to alway be subject to reordering and just set lro
4911 * In the end if lro is non-zero we add the extra time for
4916 if (rack->r_ctl.rc_reorder_ts) {
4917 if (rack->r_ctl.rc_reorder_fade) {
4918 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
4919 lro = cts - rack->r_ctl.rc_reorder_ts;
4922 * No time as passed since the last
4923 * reorder, mark it as reordering.
4928 /* Negative time? */
4931 if (lro > rack->r_ctl.rc_reorder_fade) {
4932 /* Turn off reordering seen too */
4933 rack->r_ctl.rc_reorder_ts = 0;
4937 /* Reodering does not fade */
4943 if (rack->rc_rack_tmr_std_based == 0) {
4944 thresh = srtt + rack->r_ctl.rc_pkt_delay;
4946 /* Standards based pkt-delay is 1/4 srtt */
4947 thresh = srtt + (srtt >> 2);
4949 if (lro && (rack->rc_rack_tmr_std_based == 0)) {
4950 /* It must be set, if not you get 1/4 rtt */
4951 if (rack->r_ctl.rc_reorder_shift)
4952 thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
4954 thresh += (srtt >> 2);
4956 if (rack->rc_rack_use_dsack &&
4958 (rack->r_ctl.num_dsack > 0)) {
4960 * We only increase the reordering window if we
4961 * have seen reordering <and> we have a DSACK count.
4963 thresh += rack->r_ctl.num_dsack * (srtt >> 2);
4964 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh);
4966 /* SRTT * 2 is the ceiling */
4967 if (thresh > (srtt * 2)) {
4970 /* And we don't want it above the RTO max either */
4971 if (thresh > rack_rto_max) {
4972 thresh = rack_rto_max;
4974 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh);
4979 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
4980 struct rack_sendmap *rsm, uint32_t srtt)
4982 struct rack_sendmap *prsm;
4983 uint32_t thresh, len;
4988 if (rack->r_ctl.rc_tlp_threshold)
4989 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
4991 thresh = (srtt * 2);
4993 /* Get the previous sent packet, if any */
4994 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
4995 len = rsm->r_end - rsm->r_start;
4996 if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
4997 /* Exactly like the ID */
4998 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) {
4999 uint32_t alt_thresh;
5001 * Compensate for delayed-ack with the d-ack time.
5003 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5004 if (alt_thresh > thresh)
5005 thresh = alt_thresh;
5007 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
5009 prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
5010 if (prsm && (len <= segsiz)) {
5012 * Two packets outstanding, thresh should be (2*srtt) +
5013 * possible inter-packet delay (if any).
5015 uint32_t inter_gap = 0;
5018 idx = rsm->r_rtr_cnt - 1;
5019 nidx = prsm->r_rtr_cnt - 1;
5020 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) {
5021 /* Yes it was sent later (or at the same time) */
5022 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
5024 thresh += inter_gap;
5025 } else if (len <= segsiz) {
5027 * Possibly compensate for delayed-ack.
5029 uint32_t alt_thresh;
5031 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5032 if (alt_thresh > thresh)
5033 thresh = alt_thresh;
5035 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
5037 if (len <= segsiz) {
5038 uint32_t alt_thresh;
5040 * Compensate for delayed-ack with the d-ack time.
5042 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5043 if (alt_thresh > thresh)
5044 thresh = alt_thresh;
5047 /* Not above an RTO */
5048 if (thresh > tp->t_rxtcur) {
5049 thresh = tp->t_rxtcur;
5051 /* Not above a RTO max */
5052 if (thresh > rack_rto_max) {
5053 thresh = rack_rto_max;
5055 /* Apply user supplied min TLP */
5056 if (thresh < rack_tlp_min) {
5057 thresh = rack_tlp_min;
5063 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack)
5066 * We want the rack_rtt which is the
5067 * last rtt we measured. However if that
5068 * does not exist we fallback to the srtt (which
5069 * we probably will never do) and then as a last
5070 * resort we use RACK_INITIAL_RTO if no srtt is
5073 if (rack->rc_rack_rtt)
5074 return (rack->rc_rack_rtt);
5075 else if (tp->t_srtt == 0)
5076 return (RACK_INITIAL_RTO);
5077 return (tp->t_srtt);
5080 static struct rack_sendmap *
5081 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
5084 * Check to see that we don't need to fall into recovery. We will
5085 * need to do so if our oldest transmit is past the time we should
5088 struct tcp_rack *rack;
5089 struct rack_sendmap *rsm;
5091 uint32_t srtt, thresh;
5093 rack = (struct tcp_rack *)tp->t_fb_ptr;
5094 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
5097 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5102 if (rsm->r_flags & RACK_ACKED) {
5103 rsm = rack_find_lowest_rsm(rack);
5107 idx = rsm->r_rtr_cnt - 1;
5108 srtt = rack_grab_rtt(tp, rack);
5109 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
5110 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) {
5113 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) {
5116 /* Ok if we reach here we are over-due and this guy can be sent */
5117 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
5122 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
5128 t = (tp->t_srtt + (tp->t_rttvar << 2));
5129 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
5130 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop);
5131 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
5132 ret_val = (uint32_t)tt;
5137 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack)
5140 * Start the FR timer, we do this based on getting the first one in
5141 * the rc_tmap. Note that if its NULL we must stop the timer. in all
5142 * events we need to stop the running timer (if its running) before
5143 * starting the new one.
5145 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse;
5148 int32_t is_tlp_timer = 0;
5149 struct rack_sendmap *rsm;
5151 if (rack->t_timers_stopped) {
5152 /* All timers have been stopped none are to run */
5155 if (rack->rc_in_persist) {
5156 /* We can't start any timer in persists */
5157 return (rack_get_persists_timer_val(tp, rack));
5159 rack->rc_on_min_to = 0;
5160 if ((tp->t_state < TCPS_ESTABLISHED) ||
5161 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
5164 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5165 if ((rsm == NULL) || sup_rack) {
5166 /* Nothing on the send map or no rack */
5168 time_since_sent = 0;
5169 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5172 * Should we discount the RTX timer any?
5174 * We want to discount it the smallest amount.
5175 * If a timer (Rack/TLP or RXT) has gone off more
5176 * recently thats the discount we want to use (now - timer time).
5177 * If the retransmit of the oldest packet was more recent then
5178 * we want to use that (now - oldest-packet-last_transmit_time).
5181 idx = rsm->r_rtr_cnt - 1;
5182 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx])))
5183 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
5185 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
5186 if (TSTMP_GT(cts, tstmp_touse))
5187 time_since_sent = cts - tstmp_touse;
5189 if (SEQ_LT(tp->snd_una, tp->snd_max) ||
5190 sbavail(&tptosocket(tp)->so_snd)) {
5191 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
5193 if (to > time_since_sent)
5194 to -= time_since_sent;
5196 to = rack->r_ctl.rc_min_to;
5199 /* Special case for KEEPINIT */
5200 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
5201 (TP_KEEPINIT(tp) != 0) &&
5204 * We have to put a ceiling on the rxt timer
5205 * of the keep-init timeout.
5207 uint32_t max_time, red;
5209 max_time = TICKS_2_USEC(TP_KEEPINIT(tp));
5210 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) {
5211 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]);
5217 /* Reduce timeout to the keep value if needed */
5225 if (rsm->r_flags & RACK_ACKED) {
5226 rsm = rack_find_lowest_rsm(rack);
5232 if (rack->sack_attack_disable) {
5234 * We don't want to do
5235 * any TLP's if you are an attacker.
5236 * Though if you are doing what
5237 * is expected you may still have
5238 * SACK-PASSED marks.
5242 /* Convert from ms to usecs */
5243 if ((rsm->r_flags & RACK_SACK_PASSED) ||
5244 (rsm->r_flags & RACK_RWND_COLLAPSED) ||
5245 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
5246 if ((tp->t_flags & TF_SENTFIN) &&
5247 ((tp->snd_max - tp->snd_una) == 1) &&
5248 (rsm->r_flags & RACK_HAS_FIN)) {
5250 * We don't start a rack timer if all we have is a
5255 if ((rack->use_rack_rr == 0) &&
5256 (IN_FASTRECOVERY(tp->t_flags)) &&
5257 (rack->rack_no_prr == 0) &&
5258 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) {
5260 * We are not cheating, in recovery and
5261 * not enough ack's to yet get our next
5262 * retransmission out.
5264 * Note that classified attackers do not
5265 * get to use the rack-cheat.
5269 srtt = rack_grab_rtt(tp, rack);
5270 thresh = rack_calc_thresh_rack(rack, srtt, cts);
5271 idx = rsm->r_rtr_cnt - 1;
5272 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh;
5273 if (SEQ_GEQ(exp, cts)) {
5275 if (to < rack->r_ctl.rc_min_to) {
5276 to = rack->r_ctl.rc_min_to;
5277 if (rack->r_rr_config == 3)
5278 rack->rc_on_min_to = 1;
5281 to = rack->r_ctl.rc_min_to;
5282 if (rack->r_rr_config == 3)
5283 rack->rc_on_min_to = 1;
5286 /* Ok we need to do a TLP not RACK */
5288 if ((rack->rc_tlp_in_progress != 0) &&
5289 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) {
5291 * The previous send was a TLP and we have sent
5292 * N TLP's without sending new data.
5296 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
5298 /* We found no rsm to TLP with. */
5301 if (rsm->r_flags & RACK_HAS_FIN) {
5302 /* If its a FIN we dont do TLP */
5306 idx = rsm->r_rtr_cnt - 1;
5307 time_since_sent = 0;
5308 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time))
5309 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
5311 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
5312 if (TSTMP_GT(cts, tstmp_touse))
5313 time_since_sent = cts - tstmp_touse;
5316 if ((rack->rc_srtt_measure_made == 0) &&
5317 (tp->t_srtt == 1)) {
5319 * If another stack as run and set srtt to 1,
5320 * then the srtt was 0, so lets use the initial.
5322 srtt = RACK_INITIAL_RTO;
5324 srtt_cur = tp->t_srtt;
5328 srtt = RACK_INITIAL_RTO;
5330 * If the SRTT is not keeping up and the
5331 * rack RTT has spiked we want to use
5332 * the last RTT not the smoothed one.
5334 if (rack_tlp_use_greater &&
5336 (srtt < rack_grab_rtt(tp, rack))) {
5337 srtt = rack_grab_rtt(tp, rack);
5339 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
5340 if (thresh > time_since_sent) {
5341 to = thresh - time_since_sent;
5343 to = rack->r_ctl.rc_min_to;
5344 rack_log_alt_to_to_cancel(rack,
5346 time_since_sent, /* flex2 */
5347 tstmp_touse, /* flex3 */
5348 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */
5349 (uint32_t)rsm->r_tim_lastsent[idx],
5353 if (to < rack_tlp_min) {
5356 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) {
5358 * If the TLP time works out to larger than the max
5359 * RTO lets not do TLP.. just RTO.
5364 if (is_tlp_timer == 0) {
5365 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
5367 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
5375 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5377 if (rack->rc_in_persist == 0) {
5378 if (tp->t_flags & TF_GPUTINPROG) {
5380 * Stop the goodput now, the calling of the
5381 * measurement function clears the flag.
5383 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__,
5384 RACK_QUALITY_PERSIST);
5386 #ifdef NETFLIX_SHARED_CWND
5387 if (rack->r_ctl.rc_scw) {
5388 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
5389 rack->rack_scwnd_is_idle = 1;
5392 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
5393 if (rack->r_ctl.rc_went_idle_time == 0)
5394 rack->r_ctl.rc_went_idle_time = 1;
5395 rack_timer_cancel(tp, rack, cts, __LINE__);
5396 rack->r_ctl.persist_lost_ends = 0;
5397 rack->probe_not_answered = 0;
5398 rack->forced_ack = 0;
5400 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
5401 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
5402 rack->rc_in_persist = 1;
5407 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5409 if (tcp_in_hpts(rack->rc_inp)) {
5410 tcp_hpts_remove(rack->rc_inp);
5411 rack->r_ctl.rc_hpts_flags = 0;
5413 #ifdef NETFLIX_SHARED_CWND
5414 if (rack->r_ctl.rc_scw) {
5415 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
5416 rack->rack_scwnd_is_idle = 0;
5419 if (rack->rc_gp_dyn_mul &&
5420 (rack->use_fixed_rate == 0) &&
5421 (rack->rc_always_pace)) {
5423 * Do we count this as if a probe-rtt just
5426 uint32_t time_idle, idle_min;
5428 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time;
5429 idle_min = rack_min_probertt_hold;
5430 if (rack_probertt_gpsrtt_cnt_div) {
5432 extra = (uint64_t)rack->r_ctl.rc_gp_srtt *
5433 (uint64_t)rack_probertt_gpsrtt_cnt_mul;
5434 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div;
5435 idle_min += (uint32_t)extra;
5437 if (time_idle >= idle_min) {
5438 /* Yes, we count it as a probe-rtt. */
5441 us_cts = tcp_get_usecs(NULL);
5442 if (rack->in_probe_rtt == 0) {
5443 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
5444 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
5445 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
5446 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
5448 rack_exit_probertt(rack, us_cts);
5452 rack->rc_in_persist = 0;
5453 rack->r_ctl.rc_went_idle_time = 0;
5455 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
5456 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
5457 rack->r_ctl.rc_agg_delayed = 0;
5460 rack->r_ctl.rc_agg_early = 0;
5464 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
5465 struct hpts_diag *diag, struct timeval *tv)
5467 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
5468 union tcp_log_stackspecific log;
5470 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5471 log.u_bbr.flex1 = diag->p_nxt_slot;
5472 log.u_bbr.flex2 = diag->p_cur_slot;
5473 log.u_bbr.flex3 = diag->slot_req;
5474 log.u_bbr.flex4 = diag->inp_hptsslot;
5475 log.u_bbr.flex5 = diag->slot_remaining;
5476 log.u_bbr.flex6 = diag->need_new_to;
5477 log.u_bbr.flex7 = diag->p_hpts_active;
5478 log.u_bbr.flex8 = diag->p_on_min_sleep;
5479 /* Hijack other fields as needed */
5480 log.u_bbr.epoch = diag->have_slept;
5481 log.u_bbr.lt_epoch = diag->yet_to_sleep;
5482 log.u_bbr.pkts_out = diag->co_ret;
5483 log.u_bbr.applimited = diag->hpts_sleep_time;
5484 log.u_bbr.delivered = diag->p_prev_slot;
5485 log.u_bbr.inflight = diag->p_runningslot;
5486 log.u_bbr.bw_inuse = diag->wheel_slot;
5487 log.u_bbr.rttProp = diag->wheel_cts;
5488 log.u_bbr.timeStamp = cts;
5489 log.u_bbr.delRate = diag->maxslots;
5490 log.u_bbr.cur_del_rate = diag->p_curtick;
5491 log.u_bbr.cur_del_rate <<= 32;
5492 log.u_bbr.cur_del_rate |= diag->p_lasttick;
5493 TCP_LOG_EVENTP(rack->rc_tp, NULL,
5494 &rack->rc_inp->inp_socket->so_rcv,
5495 &rack->rc_inp->inp_socket->so_snd,
5496 BBR_LOG_HPTSDIAG, 0,
5497 0, &log, false, tv);
5503 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type)
5505 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
5506 union tcp_log_stackspecific log;
5509 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5510 log.u_bbr.flex1 = sb->sb_flags;
5511 log.u_bbr.flex2 = len;
5512 log.u_bbr.flex3 = sb->sb_state;
5513 log.u_bbr.flex8 = type;
5514 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
5515 TCP_LOG_EVENTP(rack->rc_tp, NULL,
5516 &rack->rc_inp->inp_socket->so_rcv,
5517 &rack->rc_inp->inp_socket->so_snd,
5519 len, &log, false, &tv);
5524 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
5525 int32_t slot, uint32_t tot_len_this_send, int sup_rack)
5527 struct hpts_diag diag;
5528 struct inpcb *inp = tptoinpcb(tp);
5530 uint32_t delayed_ack = 0;
5531 uint32_t hpts_timeout;
5532 uint32_t entry_slot = slot;
5537 if ((tp->t_state == TCPS_CLOSED) ||
5538 (tp->t_state == TCPS_LISTEN)) {
5541 if (tcp_in_hpts(inp)) {
5542 /* Already on the pacer */
5545 stopped = rack->rc_tmr_stopped;
5546 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
5547 left = rack->r_ctl.rc_timer_exp - cts;
5549 rack->r_ctl.rc_timer_exp = 0;
5550 rack->r_ctl.rc_hpts_flags = 0;
5551 us_cts = tcp_get_usecs(&tv);
5552 /* Now early/late accounting */
5553 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0);
5554 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) {
5556 * We have a early carry over set,
5557 * we can always add more time so we
5558 * can always make this compensation.
5560 * Note if ack's are allowed to wake us do not
5561 * penalize the next timer for being awoke
5562 * by an ack aka the rc_agg_early (non-paced mode).
5564 slot += rack->r_ctl.rc_agg_early;
5566 rack->r_ctl.rc_agg_early = 0;
5570 * This is harder, we can
5571 * compensate some but it
5572 * really depends on what
5573 * the current pacing time is.
5575 if (rack->r_ctl.rc_agg_delayed >= slot) {
5577 * We can't compensate for it all.
5578 * And we have to have some time
5579 * on the clock. We always have a min
5580 * 10 slots (10 x 10 i.e. 100 usecs).
5582 if (slot <= HPTS_TICKS_PER_SLOT) {
5584 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot);
5585 slot = HPTS_TICKS_PER_SLOT;
5587 /* We take off some */
5588 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT);
5589 slot = HPTS_TICKS_PER_SLOT;
5592 slot -= rack->r_ctl.rc_agg_delayed;
5593 rack->r_ctl.rc_agg_delayed = 0;
5594 /* Make sure we have 100 useconds at minimum */
5595 if (slot < HPTS_TICKS_PER_SLOT) {
5596 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot;
5597 slot = HPTS_TICKS_PER_SLOT;
5599 if (rack->r_ctl.rc_agg_delayed == 0)
5604 /* We are pacing too */
5605 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
5607 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack);
5608 #ifdef NETFLIX_EXP_DETECTION
5609 if (rack->sack_attack_disable &&
5610 (slot < tcp_sad_pacing_interval)) {
5612 * We have a potential attacker on
5613 * the line. We have possibly some
5614 * (or now) pacing time set. We want to
5615 * slow down the processing of sacks by some
5616 * amount (if it is an attacker). Set the default
5617 * slot for attackers in place (unless the original
5618 * interval is longer). Its stored in
5619 * micro-seconds, so lets convert to msecs.
5621 slot = tcp_sad_pacing_interval;
5624 if (tp->t_flags & TF_DELACK) {
5625 delayed_ack = TICKS_2_USEC(tcp_delacktime);
5626 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
5628 if (delayed_ack && ((hpts_timeout == 0) ||
5629 (delayed_ack < hpts_timeout)))
5630 hpts_timeout = delayed_ack;
5632 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
5634 * If no timers are going to run and we will fall off the hptsi
5635 * wheel, we resort to a keep-alive timer if its configured.
5637 if ((hpts_timeout == 0) &&
5639 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
5640 (tp->t_state <= TCPS_CLOSING)) {
5642 * Ok we have no timer (persists, rack, tlp, rxt or
5643 * del-ack), we don't have segments being paced. So
5644 * all that is left is the keepalive timer.
5646 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
5647 /* Get the established keep-alive time */
5648 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp));
5651 * Get the initial setup keep-alive time,
5652 * note that this is probably not going to
5653 * happen, since rack will be running a rxt timer
5654 * if a SYN of some sort is outstanding. It is
5655 * actually handled in rack_timeout_rxt().
5657 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp));
5659 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
5660 if (rack->in_probe_rtt) {
5662 * We want to instead not wake up a long time from
5663 * now but to wake up about the time we would
5664 * exit probe-rtt and initiate a keep-alive ack.
5665 * This will get us out of probe-rtt and update
5668 hpts_timeout = rack_min_probertt_hold;
5672 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
5673 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
5675 * RACK, TLP, persists and RXT timers all are restartable
5676 * based on actions input .. i.e we received a packet (ack
5677 * or sack) and that changes things (rw, or snd_una etc).
5678 * Thus we can restart them with a new value. For
5679 * keep-alive, delayed_ack we keep track of what was left
5680 * and restart the timer with a smaller value.
5682 if (left < hpts_timeout)
5683 hpts_timeout = left;
5687 * Hack alert for now we can't time-out over 2,147,483
5688 * seconds (a bit more than 596 hours), which is probably ok
5691 if (hpts_timeout > 0x7ffffffe)
5692 hpts_timeout = 0x7ffffffe;
5693 rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
5695 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0);
5696 if ((rack->gp_ready == 0) &&
5697 (rack->use_fixed_rate == 0) &&
5698 (hpts_timeout < slot) &&
5699 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
5701 * We have no good estimate yet for the
5702 * old clunky burst mitigation or the
5703 * real pacing. And the tlp or rxt is smaller
5704 * than the pacing calculation. Lets not
5705 * pace that long since we know the calculation
5706 * so far is not accurate.
5708 slot = hpts_timeout;
5711 * Turn off all the flags for queuing by default. The
5712 * flags have important meanings to what happens when
5713 * LRO interacts with the transport. Most likely (by default now)
5714 * mbuf_queueing and ack compression are on. So the transport
5715 * has a couple of flags that control what happens (if those
5716 * are not on then these flags won't have any effect since it
5717 * won't go through the queuing LRO path).
5719 * INP_MBUF_QUEUE_READY - This flags says that I am busy
5720 * pacing output, so don't disturb. But
5721 * it also means LRO can wake me if there
5722 * is a SACK arrival.
5724 * INP_DONT_SACK_QUEUE - This flag is used in conjunction
5725 * with the above flag (QUEUE_READY) and
5726 * when present it says don't even wake me
5727 * if a SACK arrives.
5729 * The idea behind these flags is that if we are pacing we
5730 * set the MBUF_QUEUE_READY and only get woken up if
5731 * a SACK arrives (which could change things) or if
5732 * our pacing timer expires. If, however, we have a rack
5733 * timer running, then we don't even want a sack to wake
5734 * us since the rack timer has to expire before we can send.
5736 * Other cases should usually have none of the flags set
5737 * so LRO can call into us.
5739 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
5741 rack->r_ctl.rc_last_output_to = us_cts + slot;
5743 * A pacing timer (slot) is being set, in
5744 * such a case we cannot send (we are blocked by
5745 * the timer). So lets tell LRO that it should not
5746 * wake us unless there is a SACK. Note this only
5747 * will be effective if mbuf queueing is on or
5748 * compressed acks are being processed.
5750 inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
5752 * But wait if we have a Rack timer running
5753 * even a SACK should not disturb us (with
5754 * the exception of r_rr_config 3).
5756 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) &&
5757 (rack->r_rr_config != 3))
5758 inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
5759 if (rack->rc_ack_can_sendout_data) {
5761 * Ahh but wait, this is that special case
5762 * where the pacing timer can be disturbed
5763 * backout the changes (used for non-paced
5766 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
5768 if ((rack->use_rack_rr) &&
5769 (rack->r_rr_config < 2) &&
5770 ((hpts_timeout) && (hpts_timeout < slot))) {
5772 * Arrange for the hpts to kick back in after the
5773 * t-o if the t-o does not cause a send.
5775 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout),
5777 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5778 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
5780 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(slot),
5782 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5783 rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
5785 } else if (hpts_timeout) {
5787 * With respect to inp_flags2 here, lets let any new acks wake
5788 * us up here. Since we are not pacing (no pacing timer), output
5789 * can happen so we should let it. If its a Rack timer, then any inbound
5790 * packet probably won't change the sending (we will be blocked)
5791 * but it may change the prr stats so letting it in (the set defaults
5792 * at the start of this block) are good enough.
5794 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout),
5796 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5797 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
5799 /* No timer starting */
5801 if (SEQ_GT(tp->snd_max, tp->snd_una)) {
5802 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
5803 tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
5807 rack->rc_tmr_stopped = 0;
5809 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv);
5813 * RACK Timer, here we simply do logging and house keeping.
5814 * the normal rack_output() function will call the
5815 * appropriate thing to check if we need to do a RACK retransmit.
5816 * We return 1, saying don't proceed with rack_output only
5817 * when all timers have been stopped (destroyed PCB?).
5820 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5823 * This timer simply provides an internal trigger to send out data.
5824 * The check_recovery_mode call will see if there are needed
5825 * retransmissions, if so we will enter fast-recovery. The output
5826 * call may or may not do the same thing depending on sysctl
5829 struct rack_sendmap *rsm;
5831 counter_u64_add(rack_to_tot, 1);
5832 if (rack->r_state && (rack->r_state != tp->t_state))
5833 rack_set_state(tp, rack);
5834 rack->rc_on_min_to = 0;
5835 rsm = rack_check_recovery_mode(tp, cts);
5836 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm);
5838 rack->r_ctl.rc_resend = rsm;
5839 rack->r_timer_override = 1;
5840 if (rack->use_rack_rr) {
5842 * Don't accumulate extra pacing delay
5843 * we are allowing the rack timer to
5844 * over-ride pacing i.e. rrr takes precedence
5845 * if the pacing interval is longer than the rrr
5846 * time (in other words we get the min pacing
5847 * time versus rrr pacing time).
5849 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
5852 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
5854 /* restart a timer and return 1 */
5855 rack_start_hpts_timer(rack, tp, cts,
5863 rack_adjust_orig_mlen(struct rack_sendmap *rsm)
5865 if (rsm->m->m_len > rsm->orig_m_len) {
5867 * Mbuf grew, caused by sbcompress, our offset does
5870 rsm->orig_m_len = rsm->m->m_len;
5871 } else if (rsm->m->m_len < rsm->orig_m_len) {
5873 * Mbuf shrank, trimmed off the top by an ack, our
5876 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len);
5877 rsm->orig_m_len = rsm->m->m_len;
5882 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm)
5887 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) {
5888 /* Fix up the orig_m_len and possibly the mbuf offset */
5889 rack_adjust_orig_mlen(src_rsm);
5892 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start);
5893 while (soff >= m->m_len) {
5894 /* Move out past this mbuf */
5897 KASSERT((m != NULL),
5898 ("rsm:%p nrsm:%p hit at soff:%u null m",
5899 src_rsm, rsm, soff));
5903 rsm->orig_m_len = m->m_len;
5906 static __inline void
5907 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm,
5908 struct rack_sendmap *rsm, uint32_t start)
5912 nrsm->r_start = start;
5913 nrsm->r_end = rsm->r_end;
5914 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
5915 nrsm->r_flags = rsm->r_flags;
5916 nrsm->r_dupack = rsm->r_dupack;
5917 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed;
5918 nrsm->r_rtr_bytes = 0;
5919 nrsm->r_fas = rsm->r_fas;
5920 rsm->r_end = nrsm->r_start;
5921 nrsm->r_just_ret = rsm->r_just_ret;
5922 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
5923 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
5925 /* Now if we have SYN flag we keep it on the left edge */
5926 if (nrsm->r_flags & RACK_HAS_SYN)
5927 nrsm->r_flags &= ~RACK_HAS_SYN;
5928 /* Now if we have a FIN flag we keep it on the right edge */
5929 if (rsm->r_flags & RACK_HAS_FIN)
5930 rsm->r_flags &= ~RACK_HAS_FIN;
5931 /* Push bit must go to the right edge as well */
5932 if (rsm->r_flags & RACK_HAD_PUSH)
5933 rsm->r_flags &= ~RACK_HAD_PUSH;
5934 /* Clone over the state of the hw_tls flag */
5935 nrsm->r_hw_tls = rsm->r_hw_tls;
5937 * Now we need to find nrsm's new location in the mbuf chain
5938 * we basically calculate a new offset, which is soff +
5939 * how much is left in original rsm. Then we walk out the mbuf
5940 * chain to find the righ position, it may be the same mbuf
5943 KASSERT(((rsm->m != NULL) ||
5944 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))),
5945 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack));
5947 rack_setup_offset_for_rsm(rsm, nrsm);
5950 static struct rack_sendmap *
5951 rack_merge_rsm(struct tcp_rack *rack,
5952 struct rack_sendmap *l_rsm,
5953 struct rack_sendmap *r_rsm)
5956 * We are merging two ack'd RSM's,
5957 * the l_rsm is on the left (lower seq
5958 * values) and the r_rsm is on the right
5959 * (higher seq value). The simplest way
5960 * to merge these is to move the right
5961 * one into the left. I don't think there
5962 * is any reason we need to try to find
5963 * the oldest (or last oldest retransmitted).
5966 struct rack_sendmap *rm;
5968 rack_log_map_chg(rack->rc_tp, rack, NULL,
5969 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__);
5970 l_rsm->r_end = r_rsm->r_end;
5971 if (l_rsm->r_dupack < r_rsm->r_dupack)
5972 l_rsm->r_dupack = r_rsm->r_dupack;
5973 if (r_rsm->r_rtr_bytes)
5974 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes;
5975 if (r_rsm->r_in_tmap) {
5976 /* This really should not happen */
5977 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext);
5978 r_rsm->r_in_tmap = 0;
5982 if (r_rsm->r_flags & RACK_HAS_FIN)
5983 l_rsm->r_flags |= RACK_HAS_FIN;
5984 if (r_rsm->r_flags & RACK_TLP)
5985 l_rsm->r_flags |= RACK_TLP;
5986 if (r_rsm->r_flags & RACK_RWND_COLLAPSED)
5987 l_rsm->r_flags |= RACK_RWND_COLLAPSED;
5988 if ((r_rsm->r_flags & RACK_APP_LIMITED) &&
5989 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) {
5991 * If both are app-limited then let the
5992 * free lower the count. If right is app
5993 * limited and left is not, transfer.
5995 l_rsm->r_flags |= RACK_APP_LIMITED;
5996 r_rsm->r_flags &= ~RACK_APP_LIMITED;
5997 if (r_rsm == rack->r_ctl.rc_first_appl)
5998 rack->r_ctl.rc_first_appl = l_rsm;
6001 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm);
6003 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm);
6005 panic("removing head in rack:%p rsm:%p rm:%p",
6009 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) {
6010 /* Transfer the split limit to the map we free */
6011 r_rsm->r_limit_type = l_rsm->r_limit_type;
6012 l_rsm->r_limit_type = 0;
6014 rack_free(rack, r_rsm);
6019 * TLP Timer, here we simply setup what segment we want to
6020 * have the TLP expire on, the normal rack_output() will then
6023 * We return 1, saying don't proceed with rack_output only
6024 * when all timers have been stopped (destroyed PCB?).
6027 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp)
6032 struct rack_sendmap *rsm = NULL;
6034 struct rack_sendmap *insret;
6036 struct socket *so = tptosocket(tp);
6038 uint32_t out, avail;
6039 int collapsed_win = 0;
6041 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
6042 /* Its not time yet */
6045 if (ctf_progress_timeout_check(tp, true)) {
6046 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6047 return (-ETIMEDOUT); /* tcp_drop() */
6050 * A TLP timer has expired. We have been idle for 2 rtts. So we now
6051 * need to figure out how to force a full MSS segment out.
6053 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL);
6054 rack->r_ctl.retran_during_recovery = 0;
6055 rack->r_ctl.dsack_byte_cnt = 0;
6056 counter_u64_add(rack_tlp_tot, 1);
6057 if (rack->r_state && (rack->r_state != tp->t_state))
6058 rack_set_state(tp, rack);
6059 avail = sbavail(&so->so_snd);
6060 out = tp->snd_max - tp->snd_una;
6061 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) {
6062 /* special case, we need a retransmission */
6066 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) {
6067 rack->r_ctl.dsack_persist--;
6068 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
6069 rack->r_ctl.num_dsack = 0;
6071 rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
6073 if ((tp->t_flags & TF_GPUTINPROG) &&
6074 (rack->r_ctl.rc_tlp_cnt_out == 1)) {
6076 * If this is the second in a row
6077 * TLP and we are doing a measurement
6078 * its time to abandon the measurement.
6079 * Something is likely broken on
6080 * the clients network and measuring a
6081 * broken network does us no good.
6083 tp->t_flags &= ~TF_GPUTINPROG;
6084 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
6085 rack->r_ctl.rc_gp_srtt /*flex1*/,
6087 0, 0, 18, __LINE__, NULL, 0);
6090 * Check our send oldest always settings, and if
6091 * there is an oldest to send jump to the need_retran.
6093 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0))
6097 /* New data is available */
6099 if (amm > ctf_fixed_maxseg(tp)) {
6100 amm = ctf_fixed_maxseg(tp);
6101 if ((amm + out) > tp->snd_wnd) {
6102 /* We are rwnd limited */
6105 } else if (amm < ctf_fixed_maxseg(tp)) {
6106 /* not enough to fill a MTU */
6109 if (IN_FASTRECOVERY(tp->t_flags)) {
6111 if (rack->rack_no_prr == 0) {
6112 if (out + amm <= tp->snd_wnd) {
6113 rack->r_ctl.rc_prr_sndcnt = amm;
6114 rack->r_ctl.rc_tlp_new_data = amm;
6115 rack_log_to_prr(rack, 4, 0, __LINE__);
6120 /* Set the send-new override */
6121 if (out + amm <= tp->snd_wnd)
6122 rack->r_ctl.rc_tlp_new_data = amm;
6126 rack->r_ctl.rc_tlpsend = NULL;
6127 counter_u64_add(rack_tlp_newdata, 1);
6132 * Ok we need to arrange the last un-acked segment to be re-sent, or
6133 * optionally the first un-acked segment.
6135 if (collapsed_win == 0) {
6136 if (rack_always_send_oldest)
6137 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6139 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6140 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
6141 rsm = rack_find_high_nonack(rack, rsm);
6146 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
6152 * We must find the last segment
6153 * that was acceptable by the client.
6155 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
6156 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) {
6162 /* None? if so send the first */
6163 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6166 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
6172 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) {
6174 * We need to split this the last segment in two.
6176 struct rack_sendmap *nrsm;
6178 nrsm = rack_alloc_full_limit(rack);
6181 * No memory to split, we will just exit and punt
6182 * off to the RXT timer.
6186 rack_clone_rsm(rack, nrsm, rsm,
6187 (rsm->r_end - ctf_fixed_maxseg(tp)));
6188 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
6190 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
6192 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
6193 if (insret != NULL) {
6194 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
6195 nrsm, insret, rack, rsm);
6198 if (rsm->r_in_tmap) {
6199 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
6200 nrsm->r_in_tmap = 1;
6204 rack->r_ctl.rc_tlpsend = rsm;
6206 /* Make sure output path knows we are doing a TLP */
6208 rack->r_timer_override = 1;
6209 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
6212 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
6217 * Delayed ack Timer, here we simply need to setup the
6218 * ACK_NOW flag and remove the DELACK flag. From there
6219 * the output routine will send the ack out.
6221 * We only return 1, saying don't proceed, if all timers
6222 * are stopped (destroyed PCB?).
6225 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6228 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL);
6229 tp->t_flags &= ~TF_DELACK;
6230 tp->t_flags |= TF_ACKNOW;
6231 KMOD_TCPSTAT_INC(tcps_delack);
6232 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
6237 * Persists timer, here we simply send the
6238 * same thing as a keepalive will.
6239 * the one byte send.
6241 * We only return 1, saying don't proceed, if all timers
6242 * are stopped (destroyed PCB?).
6245 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6247 struct tcptemp *t_template;
6250 if (rack->rc_in_persist == 0)
6252 if (ctf_progress_timeout_check(tp, false)) {
6253 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6254 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6255 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
6256 return (-ETIMEDOUT); /* tcp_drop() */
6259 * Persistence timer into zero window. Force a byte to be output, if
6262 KMOD_TCPSTAT_INC(tcps_persisttimeo);
6264 * Hack: if the peer is dead/unreachable, we do not time out if the
6265 * window is closed. After a full backoff, drop the connection if
6266 * the idle time (no responses to probes) reaches the maximum
6267 * backoff that we would use if retransmitting.
6269 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
6270 (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
6271 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) {
6272 KMOD_TCPSTAT_INC(tcps_persistdrop);
6273 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6274 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
6275 retval = -ETIMEDOUT; /* tcp_drop() */
6278 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
6279 tp->snd_una == tp->snd_max)
6280 rack_exit_persist(tp, rack, cts);
6281 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
6283 * If the user has closed the socket then drop a persisting
6284 * connection after a much reduced timeout.
6286 if (tp->t_state > TCPS_CLOSE_WAIT &&
6287 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
6288 KMOD_TCPSTAT_INC(tcps_persistdrop);
6289 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6290 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
6291 retval = -ETIMEDOUT; /* tcp_drop() */
6294 t_template = tcpip_maketemplate(rack->rc_inp);
6296 /* only set it if we were answered */
6297 if (rack->forced_ack == 0) {
6298 rack->forced_ack = 1;
6299 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
6301 rack->probe_not_answered = 1;
6302 counter_u64_add(rack_persists_loss, 1);
6303 rack->r_ctl.persist_lost_ends++;
6305 counter_u64_add(rack_persists_sends, 1);
6306 tcp_respond(tp, t_template->tt_ipgen,
6307 &t_template->tt_t, (struct mbuf *)NULL,
6308 tp->rcv_nxt, tp->snd_una - 1, 0);
6309 /* This sends an ack */
6310 if (tp->t_flags & TF_DELACK)
6311 tp->t_flags &= ~TF_DELACK;
6312 free(t_template, M_TEMP);
6314 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
6317 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL);
6318 rack_start_hpts_timer(rack, tp, cts,
6324 * If a keepalive goes off, we had no other timers
6325 * happening. We always return 1 here since this
6326 * routine either drops the connection or sends
6327 * out a segment with respond.
6330 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6332 struct tcptemp *t_template;
6333 struct inpcb *inp = tptoinpcb(tp);
6335 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
6336 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL);
6338 * Keep-alive timer went off; send something or drop connection if
6339 * idle for too long.
6341 KMOD_TCPSTAT_INC(tcps_keeptimeo);
6342 if (tp->t_state < TCPS_ESTABLISHED)
6344 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
6345 tp->t_state <= TCPS_CLOSING) {
6346 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
6349 * Send a packet designed to force a response if the peer is
6350 * up and reachable: either an ACK if the connection is
6351 * still alive, or an RST if the peer has closed the
6352 * connection due to timeout or reboot. Using sequence
6353 * number tp->snd_una-1 causes the transmitted zero-length
6354 * segment to lie outside the receive window; by the
6355 * protocol spec, this requires the correspondent TCP to
6358 KMOD_TCPSTAT_INC(tcps_keepprobe);
6359 t_template = tcpip_maketemplate(inp);
6361 if (rack->forced_ack == 0) {
6362 rack->forced_ack = 1;
6363 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
6365 rack->probe_not_answered = 1;
6367 tcp_respond(tp, t_template->tt_ipgen,
6368 &t_template->tt_t, (struct mbuf *)NULL,
6369 tp->rcv_nxt, tp->snd_una - 1, 0);
6370 free(t_template, M_TEMP);
6373 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
6376 KMOD_TCPSTAT_INC(tcps_keepdrops);
6377 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
6378 return (-ETIMEDOUT); /* tcp_drop() */
6382 * Retransmit helper function, clear up all the ack
6383 * flags and take care of important book keeping.
6386 rack_remxt_tmr(struct tcpcb *tp)
6389 * The retransmit timer went off, all sack'd blocks must be
6392 struct rack_sendmap *rsm, *trsm = NULL;
6393 struct tcp_rack *rack;
6395 rack = (struct tcp_rack *)tp->t_fb_ptr;
6396 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__);
6397 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL);
6398 if (rack->r_state && (rack->r_state != tp->t_state))
6399 rack_set_state(tp, rack);
6401 * Ideally we would like to be able to
6402 * mark SACK-PASS on anything not acked here.
6404 * However, if we do that we would burst out
6405 * all that data 1ms apart. This would be unwise,
6406 * so for now we will just let the normal rxt timer
6407 * and tlp timer take care of it.
6409 * Also we really need to stick them back in sequence
6410 * order. This way we send in the proper order and any
6411 * sacks that come floating in will "re-ack" the data.
6412 * To do this we zap the tmap with an INIT and then
6413 * walk through and place every rsm in the RB tree
6414 * back in its seq ordered place.
6416 TAILQ_INIT(&rack->r_ctl.rc_tmap);
6417 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
6419 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
6420 /* We must re-add it back to the tlist */
6422 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
6424 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
6428 if (rsm->r_flags & RACK_ACKED)
6429 rsm->r_flags |= RACK_WAS_ACKED;
6430 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED);
6431 rsm->r_flags |= RACK_MUST_RXT;
6433 /* Clear the count (we just un-acked them) */
6434 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una;
6435 rack->r_ctl.rc_sacked = 0;
6436 rack->r_ctl.rc_sacklast = NULL;
6437 rack->r_ctl.rc_agg_delayed = 0;
6439 rack->r_ctl.rc_agg_early = 0;
6441 /* Clear the tlp rtx mark */
6442 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6443 if (rack->r_ctl.rc_resend != NULL)
6444 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT;
6445 rack->r_ctl.rc_prr_sndcnt = 0;
6446 rack_log_to_prr(rack, 6, 0, __LINE__);
6447 rack->r_timer_override = 1;
6448 if ((((tp->t_flags & TF_SACK_PERMIT) == 0)
6449 #ifdef NETFLIX_EXP_DETECTION
6450 || (rack->sack_attack_disable != 0)
6452 ) && ((tp->t_flags & TF_SENTFIN) == 0)) {
6454 * For non-sack customers new data
6455 * needs to go out as retransmits until
6456 * we retransmit up to snd_max.
6458 rack->r_must_retran = 1;
6459 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp,
6460 rack->r_ctl.rc_sacked);
6462 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
6466 rack_convert_rtts(struct tcpcb *tp)
6468 if (tp->t_srtt > 1) {
6471 val = tp->t_srtt >> TCP_RTT_SHIFT;
6472 frac = tp->t_srtt & 0x1f;
6473 tp->t_srtt = TICKS_2_USEC(val);
6475 * frac is the fractional part of the srtt (if any)
6476 * but its in ticks and every bit represents
6481 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE);
6483 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE));
6491 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT;
6492 frac = tp->t_rttvar & 0x1f;
6493 tp->t_rttvar = TICKS_2_USEC(val);
6495 * frac is the fractional part of the srtt (if any)
6496 * but its in ticks and every bit represents
6501 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE);
6503 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE));
6505 tp->t_rttvar += frac;
6508 tp->t_rxtcur = RACK_REXMTVAL(tp);
6509 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
6510 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop);
6512 if (tp->t_rxtcur > rack_rto_max) {
6513 tp->t_rxtcur = rack_rto_max;
6518 rack_cc_conn_init(struct tcpcb *tp)
6520 struct tcp_rack *rack;
6523 rack = (struct tcp_rack *)tp->t_fb_ptr;
6527 * Now convert to rack's internal format,
6530 if ((srtt == 0) && (tp->t_srtt != 0))
6531 rack_convert_rtts(tp);
6533 * We want a chance to stay in slowstart as
6534 * we create a connection. TCP spec says that
6535 * initially ssthresh is infinite. For our
6536 * purposes that is the snd_wnd.
6538 if (tp->snd_ssthresh < tp->snd_wnd) {
6539 tp->snd_ssthresh = tp->snd_wnd;
6542 * We also want to assure a IW worth of
6543 * data can get inflight.
6545 if (rc_init_window(rack) < tp->snd_cwnd)
6546 tp->snd_cwnd = rc_init_window(rack);
6550 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
6551 * we will setup to retransmit the lowest seq number outstanding.
6554 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6556 struct inpcb *inp = tptoinpcb(tp);
6561 if ((tp->t_flags & TF_GPUTINPROG) &&
6564 * We have had a second timeout
6565 * measurements on successive rxt's are not profitable.
6566 * It is unlikely to be of any use (the network is
6567 * broken or the client went away).
6569 tp->t_flags &= ~TF_GPUTINPROG;
6570 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
6571 rack->r_ctl.rc_gp_srtt /*flex1*/,
6573 0, 0, 18, __LINE__, NULL, 0);
6575 if (ctf_progress_timeout_check(tp, false)) {
6576 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
6577 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6578 return (-ETIMEDOUT); /* tcp_drop() */
6580 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
6581 rack->r_ctl.retran_during_recovery = 0;
6582 rack->rc_ack_required = 1;
6583 rack->r_ctl.dsack_byte_cnt = 0;
6584 if (IN_FASTRECOVERY(tp->t_flags))
6585 tp->t_flags |= TF_WASFRECOVERY;
6587 tp->t_flags &= ~TF_WASFRECOVERY;
6588 if (IN_CONGRECOVERY(tp->t_flags))
6589 tp->t_flags |= TF_WASCRECOVERY;
6591 tp->t_flags &= ~TF_WASCRECOVERY;
6592 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
6593 (tp->snd_una == tp->snd_max)) {
6594 /* Nothing outstanding .. nothing to do */
6597 if (rack->r_ctl.dsack_persist) {
6598 rack->r_ctl.dsack_persist--;
6599 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
6600 rack->r_ctl.num_dsack = 0;
6602 rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
6605 * Rack can only run one timer at a time, so we cannot
6606 * run a KEEPINIT (gating SYN sending) and a retransmit
6607 * timer for the SYN. So if we are in a front state and
6608 * have a KEEPINIT timer we need to check the first transmit
6609 * against now to see if we have exceeded the KEEPINIT time
6612 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
6613 (TP_KEEPINIT(tp) != 0)) {
6614 struct rack_sendmap *rsm;
6616 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6618 /* Ok we have something outstanding to test keepinit with */
6619 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) &&
6620 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) {
6621 /* We have exceeded the KEEPINIT time */
6622 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
6628 * Retransmission timer went off. Message has not been acked within
6629 * retransmit interval. Back off to a longer retransmit interval
6630 * and retransmit one segment.
6633 if ((rack->r_ctl.rc_resend == NULL) ||
6634 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) {
6636 * If the rwnd collapsed on
6637 * the one we are retransmitting
6638 * it does not count against the
6643 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) {
6644 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
6646 tp->t_rxtshift = TCP_MAXRXTSHIFT;
6647 KMOD_TCPSTAT_INC(tcps_timeoutdrop);
6648 /* XXXGL: previously t_softerror was casted to uint16_t */
6649 MPASS(tp->t_softerror >= 0);
6650 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT;
6651 goto out; /* tcp_drop() */
6653 if (tp->t_state == TCPS_SYN_SENT) {
6655 * If the SYN was retransmitted, indicate CWND to be limited
6656 * to 1 segment in cc_conn_init().
6659 } else if (tp->t_rxtshift == 1) {
6661 * first retransmit; record ssthresh and cwnd so they can be
6662 * recovered if this turns out to be a "bad" retransmit. A
6663 * retransmit is considered "bad" if an ACK for this segment
6664 * is received within RTT/2 interval; the assumption here is
6665 * that the ACK was already in flight. See "On Estimating
6666 * End-to-End Network Path Properties" by Allman and Paxson
6669 tp->snd_cwnd_prev = tp->snd_cwnd;
6670 tp->snd_ssthresh_prev = tp->snd_ssthresh;
6671 tp->snd_recover_prev = tp->snd_recover;
6672 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2);
6673 tp->t_flags |= TF_PREVVALID;
6674 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0)
6675 tp->t_flags &= ~TF_PREVVALID;
6676 KMOD_TCPSTAT_INC(tcps_rexmttimeo);
6677 if ((tp->t_state == TCPS_SYN_SENT) ||
6678 (tp->t_state == TCPS_SYN_RECEIVED))
6679 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift];
6681 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift];
6683 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt,
6684 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop);
6686 * We enter the path for PLMTUD if connection is established or, if
6687 * connection is FIN_WAIT_1 status, reason for the last is that if
6688 * amount of data we send is very small, we could send it in couple
6689 * of packets and process straight to FIN. In that case we won't
6690 * catch ESTABLISHED state.
6693 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false;
6697 if (((V_tcp_pmtud_blackhole_detect == 1) ||
6698 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) ||
6699 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) &&
6700 ((tp->t_state == TCPS_ESTABLISHED) ||
6701 (tp->t_state == TCPS_FIN_WAIT_1))) {
6703 * Idea here is that at each stage of mtu probe (usually,
6704 * 1448 -> 1188 -> 524) should be given 2 chances to recover
6705 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
6706 * should take care of that.
6708 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
6709 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
6710 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
6711 tp->t_rxtshift % 2 == 0)) {
6713 * Enter Path MTU Black-hole Detection mechanism: -
6714 * Disable Path MTU Discovery (IP "DF" bit). -
6715 * Reduce MTU to lower value than what we negotiated
6718 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
6719 /* Record that we may have found a black hole. */
6720 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
6721 /* Keep track of previous MSS. */
6722 tp->t_pmtud_saved_maxseg = tp->t_maxseg;
6726 * Reduce the MSS to blackhole value or to the
6727 * default in an attempt to retransmit.
6731 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
6732 /* Use the sysctl tuneable blackhole MSS. */
6733 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
6734 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
6735 } else if (isipv6) {
6736 /* Use the default MSS. */
6737 tp->t_maxseg = V_tcp_v6mssdflt;
6739 * Disable Path MTU Discovery when we switch
6742 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
6743 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
6746 #if defined(INET6) && defined(INET)
6750 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
6751 /* Use the sysctl tuneable blackhole MSS. */
6752 tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
6753 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
6755 /* Use the default MSS. */
6756 tp->t_maxseg = V_tcp_mssdflt;
6758 * Disable Path MTU Discovery when we switch
6761 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
6762 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
6767 * If further retransmissions are still unsuccessful
6768 * with a lowered MTU, maybe this isn't a blackhole
6769 * and we restore the previous MSS and blackhole
6770 * detection flags. The limit '6' is determined by
6771 * giving each probe stage (1448, 1188, 524) 2
6772 * chances to recover.
6774 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
6775 (tp->t_rxtshift >= 6)) {
6776 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
6777 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
6778 tp->t_maxseg = tp->t_pmtud_saved_maxseg;
6779 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed);
6784 * Disable RFC1323 and SACK if we haven't got any response to
6785 * our third SYN to work-around some broken terminal servers
6786 * (most of which have hopefully been retired) that have bad VJ
6787 * header compression code which trashes TCP segments containing
6788 * unknown-to-them TCP options.
6790 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
6791 (tp->t_rxtshift == 3))
6792 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT);
6794 * If we backed off this far, our srtt estimate is probably bogus.
6795 * Clobber it so we'll take the next rtt measurement as our srtt;
6796 * move the current srtt into rttvar to keep the current retransmit
6799 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
6801 if ((inp->inp_vflag & INP_IPV6) != 0)
6806 tp->t_rttvar += tp->t_srtt;
6809 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
6810 tp->snd_recover = tp->snd_max;
6811 tp->t_flags |= TF_ACKNOW;
6813 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__);
6819 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp)
6822 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
6824 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
6825 (tp->t_flags & TF_GPUTINPROG)) {
6827 * We have a goodput in progress
6828 * and we have entered a late state.
6829 * Do we have enough data in the sb
6830 * to handle the GPUT request?
6834 bytes = tp->gput_ack - tp->gput_seq;
6835 if (SEQ_GT(tp->gput_seq, tp->snd_una))
6836 bytes += tp->gput_seq - tp->snd_una;
6837 if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
6839 * There are not enough bytes in the socket
6840 * buffer that have been sent to cover this
6841 * measurement. Cancel it.
6843 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
6844 rack->r_ctl.rc_gp_srtt /*flex1*/,
6846 0, 0, 18, __LINE__, NULL, 0);
6847 tp->t_flags &= ~TF_GPUTINPROG;
6853 if (tp->t_state == TCPS_LISTEN) {
6854 /* no timers on listen sockets */
6855 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
6859 if ((timers & PACE_TMR_RACK) &&
6860 rack->rc_on_min_to) {
6862 * For the rack timer when we
6863 * are on a min-timeout (which means rrr_conf = 3)
6864 * we don't want to check the timer. It may
6865 * be going off for a pace and thats ok we
6866 * want to send the retransmit (if its ready).
6868 * If its on a normal rack timer (non-min) then
6869 * we will check if its expired.
6871 goto skip_time_check;
6873 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
6876 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
6878 rack_log_to_processing(rack, cts, ret, 0);
6881 if (hpts_calling == 0) {
6883 * A user send or queued mbuf (sack) has called us? We
6884 * return 0 and let the pacing guards
6885 * deal with it if they should or
6886 * should not cause a send.
6889 rack_log_to_processing(rack, cts, ret, 0);
6893 * Ok our timer went off early and we are not paced false
6894 * alarm, go back to sleep.
6897 left = rack->r_ctl.rc_timer_exp - cts;
6898 tcp_hpts_insert(tptoinpcb(tp), HPTS_MS_TO_SLOTS(left));
6899 rack_log_to_processing(rack, cts, ret, left);
6903 rack->rc_tmr_stopped = 0;
6904 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
6905 if (timers & PACE_TMR_DELACK) {
6906 ret = rack_timeout_delack(tp, rack, cts);
6907 } else if (timers & PACE_TMR_RACK) {
6908 rack->r_ctl.rc_tlp_rxt_last_time = cts;
6909 rack->r_fast_output = 0;
6910 ret = rack_timeout_rack(tp, rack, cts);
6911 } else if (timers & PACE_TMR_TLP) {
6912 rack->r_ctl.rc_tlp_rxt_last_time = cts;
6913 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp);
6914 } else if (timers & PACE_TMR_RXT) {
6915 rack->r_ctl.rc_tlp_rxt_last_time = cts;
6916 rack->r_fast_output = 0;
6917 ret = rack_timeout_rxt(tp, rack, cts);
6918 } else if (timers & PACE_TMR_PERSIT) {
6919 ret = rack_timeout_persist(tp, rack, cts);
6920 } else if (timers & PACE_TMR_KEEP) {
6921 ret = rack_timeout_keepalive(tp, rack, cts);
6923 rack_log_to_processing(rack, cts, ret, timers);
6928 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
6931 uint32_t us_cts, flags_on_entry;
6932 uint8_t hpts_removed = 0;
6934 flags_on_entry = rack->r_ctl.rc_hpts_flags;
6935 us_cts = tcp_get_usecs(&tv);
6936 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
6937 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) ||
6938 ((tp->snd_max - tp->snd_una) == 0))) {
6939 tcp_hpts_remove(rack->rc_inp);
6941 /* If we were not delayed cancel out the flag. */
6942 if ((tp->snd_max - tp->snd_una) == 0)
6943 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
6944 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
6946 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
6947 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
6948 if (tcp_in_hpts(rack->rc_inp) &&
6949 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
6951 * Canceling timer's when we have no output being
6952 * paced. We also must remove ourselves from the
6955 tcp_hpts_remove(rack->rc_inp);
6958 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
6960 if (hpts_removed == 0)
6961 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
6965 rack_stopall(struct tcpcb *tp)
6967 struct tcp_rack *rack;
6968 rack = (struct tcp_rack *)tp->t_fb_ptr;
6969 rack->t_timers_stopped = 1;
6974 rack_stop_all_timers(struct tcpcb *tp)
6976 struct tcp_rack *rack;
6979 * Assure no timers are running.
6981 if (tcp_timer_active(tp, TT_PERSIST)) {
6982 /* We enter in persists, set the flag appropriately */
6983 rack = (struct tcp_rack *)tp->t_fb_ptr;
6984 rack->rc_in_persist = 1;
6989 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
6990 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag)
6995 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
6997 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
6998 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
6999 rsm->r_flags |= RACK_OVERMAX;
7001 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) {
7002 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
7003 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
7005 idx = rsm->r_rtr_cnt - 1;
7006 rsm->r_tim_lastsent[idx] = ts;
7008 * Here we don't add in the len of send, since its already
7009 * in snduna <->snd_max.
7011 rsm->r_fas = ctf_flight_size(rack->rc_tp,
7012 rack->r_ctl.rc_sacked);
7013 if (rsm->r_flags & RACK_ACKED) {
7014 /* Problably MTU discovery messing with us */
7015 rsm->r_flags &= ~RACK_ACKED;
7016 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
7018 if (rsm->r_in_tmap) {
7019 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7022 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7024 /* Take off the must retransmit flag, if its on */
7025 if (rsm->r_flags & RACK_MUST_RXT) {
7026 if (rack->r_must_retran)
7027 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
7028 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
7030 * We have retransmitted all we need. Clear
7031 * any must retransmit flags.
7033 rack->r_must_retran = 0;
7034 rack->r_ctl.rc_out_at_rto = 0;
7036 rsm->r_flags &= ~RACK_MUST_RXT;
7038 if (rsm->r_flags & RACK_SACK_PASSED) {
7039 /* We have retransmitted due to the SACK pass */
7040 rsm->r_flags &= ~RACK_SACK_PASSED;
7041 rsm->r_flags |= RACK_WAS_SACKPASS;
7046 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
7047 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag)
7050 * We (re-)transmitted starting at rsm->r_start for some length
7051 * (possibly less than r_end.
7053 struct rack_sendmap *nrsm;
7055 struct rack_sendmap *insret;
7061 c_end = rsm->r_start + len;
7062 if (SEQ_GEQ(c_end, rsm->r_end)) {
7064 * We retransmitted the whole piece or more than the whole
7065 * slopping into the next rsm.
7067 rack_update_rsm(tp, rack, rsm, ts, add_flag);
7068 if (c_end == rsm->r_end) {
7074 /* Hangs over the end return whats left */
7075 act_len = rsm->r_end - rsm->r_start;
7076 *lenp = (len - act_len);
7077 return (rsm->r_end);
7079 /* We don't get out of this block. */
7082 * Here we retransmitted less than the whole thing which means we
7083 * have to split this into what was transmitted and what was not.
7085 nrsm = rack_alloc_full_limit(rack);
7088 * We can't get memory, so lets not proceed.
7094 * So here we are going to take the original rsm and make it what we
7095 * retransmitted. nrsm will be the tail portion we did not
7096 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
7097 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
7098 * 1, 6 and the new piece will be 6, 11.
7100 rack_clone_rsm(rack, nrsm, rsm, c_end);
7102 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
7104 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7106 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7107 if (insret != NULL) {
7108 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7109 nrsm, insret, rack, rsm);
7112 if (rsm->r_in_tmap) {
7113 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7114 nrsm->r_in_tmap = 1;
7116 rsm->r_flags &= (~RACK_HAS_FIN);
7117 rack_update_rsm(tp, rack, rsm, ts, add_flag);
7118 /* Log a split of rsm into rsm and nrsm */
7119 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
7125 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
7126 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts,
7127 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls)
7129 struct tcp_rack *rack;
7130 struct rack_sendmap *rsm, *nrsm, fe;
7132 struct rack_sendmap *insret;
7134 register uint32_t snd_max, snd_una;
7137 * Add to the RACK log of packets in flight or retransmitted. If
7138 * there is a TS option we will use the TS echoed, if not we will
7141 * Retransmissions will increment the count and move the ts to its
7142 * proper place. Note that if options do not include TS's then we
7143 * won't be able to effectively use the ACK for an RTT on a retran.
7145 * Notes about r_start and r_end. Lets consider a send starting at
7146 * sequence 1 for 10 bytes. In such an example the r_start would be
7147 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
7148 * This means that r_end is actually the first sequence for the next
7153 * If err is set what do we do XXXrrs? should we not add the thing?
7154 * -- i.e. return if err != 0 or should we pretend we sent it? --
7155 * i.e. proceed with add ** do this for now.
7157 INP_WLOCK_ASSERT(tptoinpcb(tp));
7160 * We don't log errors -- we could but snd_max does not
7161 * advance in this case either.
7165 if (th_flags & TH_RST) {
7167 * We don't log resets and we return immediately from
7172 rack = (struct tcp_rack *)tp->t_fb_ptr;
7173 snd_una = tp->snd_una;
7174 snd_max = tp->snd_max;
7175 if (th_flags & (TH_SYN | TH_FIN)) {
7177 * The call to rack_log_output is made before bumping
7178 * snd_max. This means we can record one extra byte on a SYN
7179 * or FIN if seq_out is adding more on and a FIN is present
7180 * (and we are not resending).
7182 if ((th_flags & TH_SYN) && (seq_out == tp->iss))
7184 if (th_flags & TH_FIN)
7186 if (SEQ_LT(snd_max, tp->snd_nxt)) {
7188 * The add/update as not been done for the FIN/SYN
7191 snd_max = tp->snd_nxt;
7194 if (SEQ_LEQ((seq_out + len), snd_una)) {
7195 /* Are sending an old segment to induce an ack (keep-alive)? */
7198 if (SEQ_LT(seq_out, snd_una)) {
7199 /* huh? should we panic? */
7202 end = seq_out + len;
7204 if (SEQ_GEQ(end, seq_out))
7205 len = end - seq_out;
7210 /* We don't log zero window probes */
7213 if (IN_FASTRECOVERY(tp->t_flags)) {
7214 rack->r_ctl.rc_prr_out += len;
7216 /* First question is it a retransmission or new? */
7217 if (seq_out == snd_max) {
7220 rsm = rack_alloc(rack);
7223 * Hmm out of memory and the tcb got destroyed while
7228 if (th_flags & TH_FIN) {
7229 rsm->r_flags = RACK_HAS_FIN|add_flag;
7231 rsm->r_flags = add_flag;
7235 rsm->r_tim_lastsent[0] = cts;
7237 rsm->r_rtr_bytes = 0;
7238 if (th_flags & TH_SYN) {
7239 /* The data space is one beyond snd_una */
7240 rsm->r_flags |= RACK_HAS_SYN;
7242 rsm->r_start = seq_out;
7243 rsm->r_end = rsm->r_start + len;
7246 * save off the mbuf location that
7247 * sndmbuf_noadv returned (which is
7248 * where we started copying from)..
7253 * Here we do add in the len of send, since its not yet
7254 * reflected in in snduna <->snd_max
7256 rsm->r_fas = (ctf_flight_size(rack->rc_tp,
7257 rack->r_ctl.rc_sacked) +
7258 (rsm->r_end - rsm->r_start));
7259 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */
7261 if (rsm->m->m_len <= rsm->soff) {
7263 * XXXrrs Question, will this happen?
7265 * If sbsndptr is set at the correct place
7266 * then s_moff should always be somewhere
7267 * within rsm->m. But if the sbsndptr was
7268 * off then that won't be true. If it occurs
7269 * we need to walkout to the correct location.
7274 while (lm->m_len <= rsm->soff) {
7275 rsm->soff -= lm->m_len;
7277 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u",
7278 __func__, rack, s_moff, s_mb, rsm->soff));
7282 rsm->orig_m_len = rsm->m->m_len;
7284 rsm->orig_m_len = 0;
7285 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7287 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__);
7289 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7291 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7292 if (insret != NULL) {
7293 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7294 nrsm, insret, rack, rsm);
7297 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7300 * Special case detection, is there just a single
7301 * packet outstanding when we are not in recovery?
7303 * If this is true mark it so.
7305 if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
7306 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) {
7307 struct rack_sendmap *prsm;
7309 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7311 prsm->r_one_out_nr = 1;
7316 * If we reach here its a retransmission and we need to find it.
7318 memset(&fe, 0, sizeof(fe));
7320 if (hintrsm && (hintrsm->r_start == seq_out)) {
7324 /* No hints sorry */
7327 if ((rsm) && (rsm->r_start == seq_out)) {
7328 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag);
7335 /* Ok it was not the last pointer go through it the hard way. */
7337 fe.r_start = seq_out;
7338 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
7340 if (rsm->r_start == seq_out) {
7341 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag);
7348 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
7349 /* Transmitted within this piece */
7351 * Ok we must split off the front and then let the
7352 * update do the rest
7354 nrsm = rack_alloc_full_limit(rack);
7356 rack_update_rsm(tp, rack, rsm, cts, add_flag);
7360 * copy rsm to nrsm and then trim the front of rsm
7361 * to not include this part.
7363 rack_clone_rsm(rack, nrsm, rsm, seq_out);
7364 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
7366 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7368 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7369 if (insret != NULL) {
7370 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7371 nrsm, insret, rack, rsm);
7374 if (rsm->r_in_tmap) {
7375 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7376 nrsm->r_in_tmap = 1;
7378 rsm->r_flags &= (~RACK_HAS_FIN);
7379 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag);
7387 * Hmm not found in map did they retransmit both old and on into the
7390 if (seq_out == tp->snd_max) {
7392 } else if (SEQ_LT(seq_out, tp->snd_max)) {
7394 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
7395 seq_out, len, tp->snd_una, tp->snd_max);
7396 printf("Starting Dump of all rack entries\n");
7397 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
7398 printf("rsm:%p start:%u end:%u\n",
7399 rsm, rsm->r_start, rsm->r_end);
7401 printf("Dump complete\n");
7402 panic("seq_out not found rack:%p tp:%p",
7408 * Hmm beyond sndmax? (only if we are using the new rtt-pack
7411 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
7412 seq_out, len, tp->snd_max, tp);
7418 * Record one of the RTT updates from an ack into
7419 * our sample structure.
7423 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt,
7424 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt)
7426 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7427 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
7428 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
7430 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7431 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
7432 rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
7434 if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
7435 if (us_rtt < rack->r_ctl.rc_gp_lowrtt)
7436 rack->r_ctl.rc_gp_lowrtt = us_rtt;
7437 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd)
7438 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
7440 if ((confidence == 1) &&
7442 (rsm->r_just_ret) ||
7443 (rsm->r_one_out_nr &&
7444 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) {
7446 * If the rsm had a just return
7447 * hit it then we can't trust the
7448 * rtt measurement for buffer deterimination
7449 * Note that a confidence of 2, indicates
7450 * SACK'd which overrides the r_just_ret or
7451 * the r_one_out_nr. If it was a CUM-ACK and
7452 * we had only two outstanding, but get an
7453 * ack for only 1. Then that also lowers our
7458 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7459 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) {
7460 if (rack->r_ctl.rack_rs.confidence == 0) {
7462 * We take anything with no current confidence
7465 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
7466 rack->r_ctl.rack_rs.confidence = confidence;
7467 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
7468 } else if (confidence || rack->r_ctl.rack_rs.confidence) {
7470 * Once we have a confident number,
7471 * we can update it with a smaller
7472 * value since this confident number
7473 * may include the DSACK time until
7474 * the next segment (the second one) arrived.
7476 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
7477 rack->r_ctl.rack_rs.confidence = confidence;
7478 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
7481 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence);
7482 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
7483 rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
7484 rack->r_ctl.rack_rs.rs_rtt_cnt++;
7488 * Collect new round-trip time estimate
7489 * and update averages and current timeout.
7492 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
7497 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
7498 /* No valid sample */
7500 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
7501 /* We are to use the lowest RTT seen in a single ack */
7502 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
7503 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
7504 /* We are to use the highest RTT seen in a single ack */
7505 rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
7506 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
7507 /* We are to use the average RTT seen in a single ack */
7508 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
7509 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
7512 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
7518 if (rack->rc_gp_rtt_set == 0) {
7520 * With no RTT we have to accept
7521 * even one we are not confident of.
7523 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt;
7524 rack->rc_gp_rtt_set = 1;
7525 } else if (rack->r_ctl.rack_rs.confidence) {
7526 /* update the running gp srtt */
7527 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8);
7528 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8;
7530 if (rack->r_ctl.rack_rs.confidence) {
7532 * record the low and high for highly buffered path computation,
7533 * we only do this if we are confident (not a retransmission).
7535 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) {
7536 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7538 if (rack->rc_highly_buffered == 0) {
7540 * Currently once we declare a path has
7541 * highly buffered there is no going
7542 * back, which may be a problem...
7544 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) {
7545 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt,
7546 rack->r_ctl.rc_highest_us_rtt,
7547 rack->r_ctl.rc_lowest_us_rtt,
7549 rack->rc_highly_buffered = 1;
7553 if ((rack->r_ctl.rack_rs.confidence) ||
7554 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) {
7556 * If we are highly confident of it <or> it was
7557 * never retransmitted we accept it as the last us_rtt.
7559 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7560 /* The lowest rtt can be set if its was not retransmited */
7561 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) {
7562 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7563 if (rack->r_ctl.rc_lowest_us_rtt == 0)
7564 rack->r_ctl.rc_lowest_us_rtt = 1;
7567 rack = (struct tcp_rack *)tp->t_fb_ptr;
7568 if (tp->t_srtt != 0) {
7570 * We keep a simple srtt in microseconds, like our rtt
7571 * measurement. We don't need to do any tricks with shifting
7572 * etc. Instead we just add in 1/8th of the new measurement
7573 * and subtract out 1/8 of the old srtt. We do the same with
7574 * the variance after finding the absolute value of the
7575 * difference between this sample and the current srtt.
7577 delta = tp->t_srtt - rtt;
7578 /* Take off 1/8th of the current sRTT */
7579 tp->t_srtt -= (tp->t_srtt >> 3);
7580 /* Add in 1/8th of the new RTT just measured */
7581 tp->t_srtt += (rtt >> 3);
7582 if (tp->t_srtt <= 0)
7584 /* Now lets make the absolute value of the variance */
7587 /* Subtract out 1/8th */
7588 tp->t_rttvar -= (tp->t_rttvar >> 3);
7589 /* Add in 1/8th of the new variance we just saw */
7590 tp->t_rttvar += (delta >> 3);
7591 if (tp->t_rttvar <= 0)
7595 * No rtt measurement yet - use the unsmoothed rtt. Set the
7596 * variance to half the rtt (so our first retransmit happens
7600 tp->t_rttvar = rtt >> 1;
7602 rack->rc_srtt_measure_made = 1;
7603 KMOD_TCPSTAT_INC(tcps_rttupdated);
7604 if (tp->t_rttupdated < UCHAR_MAX)
7607 if (rack_stats_gets_ms_rtt == 0) {
7608 /* Send in the microsecond rtt used for rxt timeout purposes */
7609 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
7610 } else if (rack_stats_gets_ms_rtt == 1) {
7611 /* Send in the millisecond rtt used for rxt timeout purposes */
7615 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
7616 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
7617 } else if (rack_stats_gets_ms_rtt == 2) {
7618 /* Send in the millisecond rtt has close to the path RTT as we can get */
7622 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
7623 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
7625 /* Send in the microsecond rtt has close to the path RTT as we can get */
7626 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt));
7631 * the retransmit should happen at rtt + 4 * rttvar. Because of the
7632 * way we do the smoothing, srtt and rttvar will each average +1/2
7633 * tick of bias. When we compute the retransmit timer, we want 1/2
7634 * tick of rounding and 1 extra tick because of +-1/2 tick
7635 * uncertainty in the firing of the timer. The bias will give us
7636 * exactly the 1.5 tick we need. But, because the bias is
7637 * statistical, we have to test that we don't drop below the minimum
7638 * feasible timer (which is 2 ticks).
7641 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
7642 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop);
7643 rack_log_rtt_sample(rack, rtt);
7644 tp->t_softerror = 0;
7649 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts)
7652 * Apply to filter the inbound us-rtt at us_cts.
7656 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
7657 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt,
7659 if (old_rtt > us_rtt) {
7660 /* We just hit a new lower rtt time */
7661 rack_log_rtt_shrinks(rack, us_cts, old_rtt,
7662 __LINE__, RACK_RTTS_NEWRTT);
7664 * Only count it if its lower than what we saw within our
7667 if ((old_rtt - us_rtt) > rack_min_rtt_movement) {
7668 if (rack_probertt_lower_within &&
7669 rack->rc_gp_dyn_mul &&
7670 (rack->use_fixed_rate == 0) &&
7671 (rack->rc_always_pace)) {
7673 * We are seeing a new lower rtt very close
7674 * to the time that we would have entered probe-rtt.
7675 * This is probably due to the fact that a peer flow
7676 * has entered probe-rtt. Lets go in now too.
7680 val = rack_probertt_lower_within * rack_time_between_probertt;
7682 if ((rack->in_probe_rtt == 0) &&
7683 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) {
7684 rack_enter_probertt(rack, us_cts);
7687 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
7693 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
7694 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack)
7698 uint32_t t, len_acked;
7700 if ((rsm->r_flags & RACK_ACKED) ||
7701 (rsm->r_flags & RACK_WAS_ACKED))
7704 if (rsm->r_no_rtt_allowed) {
7708 if (ack_type == CUM_ACKED) {
7709 if (SEQ_GT(th_ack, rsm->r_end)) {
7710 len_acked = rsm->r_end - rsm->r_start;
7713 len_acked = th_ack - rsm->r_start;
7717 len_acked = rsm->r_end - rsm->r_start;
7720 if (rsm->r_rtr_cnt == 1) {
7722 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7725 if (!tp->t_rttlow || tp->t_rttlow > t)
7727 if (!rack->r_ctl.rc_rack_min_rtt ||
7728 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7729 rack->r_ctl.rc_rack_min_rtt = t;
7730 if (rack->r_ctl.rc_rack_min_rtt == 0) {
7731 rack->r_ctl.rc_rack_min_rtt = 1;
7734 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]))
7735 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
7737 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
7740 if (CC_ALGO(tp)->rttsample != NULL) {
7741 /* Kick the RTT to the CC */
7742 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas);
7744 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
7745 if (ack_type == SACKED) {
7746 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1);
7747 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt);
7750 * We need to setup what our confidence
7753 * If the rsm was app limited and it is
7754 * less than a mss in length (the end
7755 * of the send) then we have a gap. If we
7756 * were app limited but say we were sending
7757 * multiple MSS's then we are more confident
7760 * When we are not app-limited then we see if
7761 * the rsm is being included in the current
7762 * measurement, we tell this by the app_limited_needs_set
7765 * Note that being cwnd blocked is not applimited
7766 * as well as the pacing delay between packets which
7767 * are sending only 1 or 2 MSS's also will show up
7768 * in the RTT. We probably need to examine this algorithm
7769 * a bit more and enhance it to account for the delay
7770 * between rsm's. We could do that by saving off the
7771 * pacing delay of each rsm (in an rsm) and then
7772 * factoring that in somehow though for now I am
7777 if (rsm->r_flags & RACK_APP_LIMITED) {
7778 if (all && (len_acked <= ctf_fixed_maxseg(tp)))
7782 } else if (rack->app_limited_needs_set == 0) {
7787 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2);
7788 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt,
7789 calc_conf, rsm, rsm->r_rtr_cnt);
7791 if ((rsm->r_flags & RACK_TLP) &&
7792 (!IN_FASTRECOVERY(tp->t_flags))) {
7793 /* Segment was a TLP and our retrans matched */
7794 if (rack->r_ctl.rc_tlp_cwnd_reduce) {
7795 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
7798 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
7799 /* New more recent rack_tmit_time */
7800 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7801 rack->rc_rack_rtt = t;
7806 * We clear the soft/rxtshift since we got an ack.
7807 * There is no assurance we will call the commit() function
7808 * so we need to clear these to avoid incorrect handling.
7811 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
7812 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
7813 tp->t_softerror = 0;
7814 if (to && (to->to_flags & TOF_TS) &&
7815 (ack_type == CUM_ACKED) &&
7817 ((rsm->r_flags & RACK_OVERMAX) == 0)) {
7819 * Now which timestamp does it match? In this block the ACK
7820 * must be coming from a previous transmission.
7822 for (i = 0; i < rsm->r_rtr_cnt; i++) {
7823 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) {
7824 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
7827 if (CC_ALGO(tp)->rttsample != NULL) {
7829 * Kick the RTT to the CC, here
7830 * we lie a bit in that we know the
7831 * retransmission is correct even though
7832 * we retransmitted. This is because
7833 * we match the timestamps.
7835 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i]))
7836 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i];
7838 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i];
7839 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas);
7841 if ((i + 1) < rsm->r_rtr_cnt) {
7843 * The peer ack'd from our previous
7844 * transmission. We have a spurious
7845 * retransmission and thus we dont
7846 * want to update our rack_rtt.
7848 * Hmm should there be a CC revert here?
7853 if (!tp->t_rttlow || tp->t_rttlow > t)
7855 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7856 rack->r_ctl.rc_rack_min_rtt = t;
7857 if (rack->r_ctl.rc_rack_min_rtt == 0) {
7858 rack->r_ctl.rc_rack_min_rtt = 1;
7861 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
7862 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
7863 /* New more recent rack_tmit_time */
7864 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7865 rack->rc_rack_rtt = t;
7867 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3);
7868 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm,
7876 * Ok its a SACK block that we retransmitted. or a windows
7877 * machine without timestamps. We can tell nothing from the
7878 * time-stamp since its not there or the time the peer last
7879 * recieved a segment that moved forward its cum-ack point.
7882 i = rsm->r_rtr_cnt - 1;
7883 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
7886 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7888 * We retransmitted and the ack came back in less
7889 * than the smallest rtt we have observed. We most
7890 * likely did an improper retransmit as outlined in
7891 * 6.2 Step 2 point 2 in the rack-draft so we
7892 * don't want to update our rack_rtt. We in
7893 * theory (in future) might want to think about reverting our
7894 * cwnd state but we won't for now.
7897 } else if (rack->r_ctl.rc_rack_min_rtt) {
7899 * We retransmitted it and the retransmit did the
7902 if (!rack->r_ctl.rc_rack_min_rtt ||
7903 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7904 rack->r_ctl.rc_rack_min_rtt = t;
7905 if (rack->r_ctl.rc_rack_min_rtt == 0) {
7906 rack->r_ctl.rc_rack_min_rtt = 1;
7909 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) {
7910 /* New more recent rack_tmit_time */
7911 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i];
7912 rack->rc_rack_rtt = t;
7921 * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
7924 rack_log_sack_passed(struct tcpcb *tp,
7925 struct tcp_rack *rack, struct rack_sendmap *rsm)
7927 struct rack_sendmap *nrsm;
7930 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
7931 rack_head, r_tnext) {
7933 /* Skip original segment he is acked */
7936 if (nrsm->r_flags & RACK_ACKED) {
7938 * Skip ack'd segments, though we
7939 * should not see these, since tmap
7940 * should not have ack'd segments.
7944 if (nrsm->r_flags & RACK_RWND_COLLAPSED) {
7946 * If the peer dropped the rwnd on
7947 * these then we don't worry about them.
7951 if (nrsm->r_flags & RACK_SACK_PASSED) {
7953 * We found one that is already marked
7954 * passed, we have been here before and
7955 * so all others below this are marked.
7959 nrsm->r_flags |= RACK_SACK_PASSED;
7960 nrsm->r_flags &= ~RACK_WAS_SACKPASS;
7965 rack_need_set_test(struct tcpcb *tp,
7966 struct tcp_rack *rack,
7967 struct rack_sendmap *rsm,
7973 if ((tp->t_flags & TF_GPUTINPROG) &&
7974 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
7976 * We were app limited, and this ack
7977 * butts up or goes beyond the point where we want
7978 * to start our next measurement. We need
7979 * to record the new gput_ts as here and
7980 * possibly update the start sequence.
7984 if (rsm->r_rtr_cnt > 1) {
7986 * This is a retransmit, can we
7987 * really make any assessment at this
7988 * point? We are not really sure of
7989 * the timestamp, is it this or the
7990 * previous transmission?
7992 * Lets wait for something better that
7993 * is not retransmitted.
7999 rack->app_limited_needs_set = 0;
8000 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
8001 /* Do we start at a new end? */
8002 if ((use_which == RACK_USE_BEG) &&
8003 SEQ_GEQ(rsm->r_start, tp->gput_seq)) {
8005 * When we get an ACK that just eats
8006 * up some of the rsm, we set RACK_USE_BEG
8007 * since whats at r_start (i.e. th_ack)
8008 * is left unacked and thats where the
8009 * measurement not starts.
8011 tp->gput_seq = rsm->r_start;
8012 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8014 if ((use_which == RACK_USE_END) &&
8015 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
8017 * We use the end when the cumack
8018 * is moving forward and completely
8019 * deleting the rsm passed so basically
8020 * r_end holds th_ack.
8022 * For SACK's we also want to use the end
8023 * since this piece just got sacked and
8024 * we want to target anything after that
8025 * in our measurement.
8027 tp->gput_seq = rsm->r_end;
8028 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8030 if (use_which == RACK_USE_END_OR_THACK) {
8032 * special case for ack moving forward,
8033 * not a sack, we need to move all the
8034 * way up to where this ack cum-ack moves
8037 if (SEQ_GT(th_ack, rsm->r_end))
8038 tp->gput_seq = th_ack;
8040 tp->gput_seq = rsm->r_end;
8041 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8043 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) {
8045 * We moved beyond this guy's range, re-calculate
8046 * the new end point.
8048 if (rack->rc_gp_filled == 0) {
8049 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
8051 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
8055 * We are moving the goal post, we may be able to clear the
8056 * measure_saw_probe_rtt flag.
8058 if ((rack->in_probe_rtt == 0) &&
8059 (rack->measure_saw_probe_rtt) &&
8060 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
8061 rack->measure_saw_probe_rtt = 0;
8062 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts,
8063 seq, tp->gput_seq, 0, 5, line, NULL, 0);
8064 if (rack->rc_gp_filled &&
8065 ((tp->gput_ack - tp->gput_seq) <
8066 max(rc_init_window(rack), (MIN_GP_WIN *
8067 ctf_fixed_maxseg(tp))))) {
8068 uint32_t ideal_amount;
8070 ideal_amount = rack_get_measure_window(tp, rack);
8071 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) {
8073 * There is no sense of continuing this measurement
8074 * because its too small to gain us anything we
8075 * trust. Skip it and that way we can start a new
8076 * measurement quicker.
8078 tp->t_flags &= ~TF_GPUTINPROG;
8079 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
8080 0, 0, 0, 6, __LINE__, NULL, 0);
8083 * Reset the window further out.
8085 tp->gput_ack = tp->gput_seq + ideal_amount;
8092 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm)
8094 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) {
8095 /* Behind our TLP definition or right at */
8098 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) {
8099 /* The start is beyond or right at our end of TLP definition */
8102 /* It has to be a sub-part of the original TLP recorded */
8108 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
8109 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two)
8111 uint32_t start, end, changed = 0;
8112 struct rack_sendmap stack_map;
8113 struct rack_sendmap *rsm, *nrsm, fe, *prev, *next;
8115 struct rack_sendmap *insret;
8117 int32_t used_ref = 1;
8120 start = sack->start;
8123 memset(&fe, 0, sizeof(fe));
8125 if ((rsm == NULL) ||
8126 (SEQ_LT(end, rsm->r_start)) ||
8127 (SEQ_GEQ(start, rsm->r_end)) ||
8128 (SEQ_LT(start, rsm->r_start))) {
8130 * We are not in the right spot,
8131 * find the correct spot in the tree.
8135 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
8142 /* Ok we have an ACK for some piece of this rsm */
8143 if (rsm->r_start != start) {
8144 if ((rsm->r_flags & RACK_ACKED) == 0) {
8146 * Before any splitting or hookery is
8147 * done is it a TLP of interest i.e. rxt?
8149 if ((rsm->r_flags & RACK_TLP) &&
8150 (rsm->r_rtr_cnt > 1)) {
8152 * We are splitting a rxt TLP, check
8153 * if we need to save off the start/end
8155 if (rack->rc_last_tlp_acked_set &&
8156 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
8158 * We already turned this on since we are inside
8159 * the previous one was a partially sack now we
8160 * are getting another one (maybe all of it).
8163 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
8165 * Lets make sure we have all of it though.
8167 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
8168 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8169 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8170 rack->r_ctl.last_tlp_acked_end);
8172 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
8173 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8174 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8175 rack->r_ctl.last_tlp_acked_end);
8178 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8179 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8180 rack->rc_last_tlp_past_cumack = 0;
8181 rack->rc_last_tlp_acked_set = 1;
8182 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
8186 * Need to split this in two pieces the before and after,
8187 * the before remains in the map, the after must be
8188 * added. In other words we have:
8189 * rsm |--------------|
8193 * and nrsm will be the sacked piece
8196 * But before we start down that path lets
8197 * see if the sack spans over on top of
8198 * the next guy and it is already sacked.
8201 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8202 if (next && (next->r_flags & RACK_ACKED) &&
8203 SEQ_GEQ(end, next->r_start)) {
8205 * So the next one is already acked, and
8206 * we can thus by hookery use our stack_map
8207 * to reflect the piece being sacked and
8208 * then adjust the two tree entries moving
8209 * the start and ends around. So we start like:
8210 * rsm |------------| (not-acked)
8211 * next |-----------| (acked)
8212 * sackblk |-------->
8213 * We want to end like so:
8214 * rsm |------| (not-acked)
8215 * next |-----------------| (acked)
8217 * Where nrsm is a temporary stack piece we
8218 * use to update all the gizmos.
8220 /* Copy up our fudge block */
8222 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
8223 /* Now adjust our tree blocks */
8225 next->r_start = start;
8226 /* Now we must adjust back where next->m is */
8227 rack_setup_offset_for_rsm(rsm, next);
8229 /* We don't need to adjust rsm, it did not change */
8230 /* Clear out the dup ack count of the remainder */
8232 rsm->r_just_ret = 0;
8233 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8234 /* Now lets make sure our fudge block is right */
8235 nrsm->r_start = start;
8236 /* Now lets update all the stats and such */
8237 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
8238 if (rack->app_limited_needs_set)
8239 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
8240 changed += (nrsm->r_end - nrsm->r_start);
8241 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
8242 if (nrsm->r_flags & RACK_SACK_PASSED) {
8243 rack->r_ctl.rc_reorder_ts = cts;
8246 * Now we want to go up from rsm (the
8247 * one left un-acked) to the next one
8248 * in the tmap. We do this so when
8249 * we walk backwards we include marking
8250 * sack-passed on rsm (The one passed in
8251 * is skipped since it is generally called
8252 * on something sacked before removing it
8255 if (rsm->r_in_tmap) {
8256 nrsm = TAILQ_NEXT(rsm, r_tnext);
8258 * Now that we have the next
8259 * one walk backwards from there.
8261 if (nrsm && nrsm->r_in_tmap)
8262 rack_log_sack_passed(tp, rack, nrsm);
8264 /* Now are we done? */
8265 if (SEQ_LT(end, next->r_end) ||
8266 (end == next->r_end)) {
8267 /* Done with block */
8270 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__);
8271 counter_u64_add(rack_sack_used_next_merge, 1);
8272 /* Postion for the next block */
8273 start = next->r_end;
8274 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next);
8279 * We can't use any hookery here, so we
8280 * need to split the map. We enter like
8284 * We will add the new block nrsm and
8285 * that will be the new portion, and then
8286 * fall through after reseting rsm. So we
8287 * split and look like this:
8291 * We then fall through reseting
8292 * rsm to nrsm, so the next block
8295 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
8298 * failed XXXrrs what can we do but loose the sack
8303 counter_u64_add(rack_sack_splits, 1);
8304 rack_clone_rsm(rack, nrsm, rsm, start);
8305 rsm->r_just_ret = 0;
8307 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8309 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8310 if (insret != NULL) {
8311 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
8312 nrsm, insret, rack, rsm);
8315 if (rsm->r_in_tmap) {
8316 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8317 nrsm->r_in_tmap = 1;
8319 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__);
8320 rsm->r_flags &= (~RACK_HAS_FIN);
8321 /* Position us to point to the new nrsm that starts the sack blk */
8325 /* Already sacked this piece */
8326 counter_u64_add(rack_sack_skipped_acked, 1);
8328 if (end == rsm->r_end) {
8329 /* Done with block */
8330 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8332 } else if (SEQ_LT(end, rsm->r_end)) {
8333 /* A partial sack to a already sacked block */
8335 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8339 * The end goes beyond this guy
8340 * reposition the start to the
8344 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8350 if (SEQ_GEQ(end, rsm->r_end)) {
8352 * The end of this block is either beyond this guy or right
8353 * at this guy. I.e.:
8359 if ((rsm->r_flags & RACK_ACKED) == 0) {
8361 * Is it a TLP of interest?
8363 if ((rsm->r_flags & RACK_TLP) &&
8364 (rsm->r_rtr_cnt > 1)) {
8366 * We are splitting a rxt TLP, check
8367 * if we need to save off the start/end
8369 if (rack->rc_last_tlp_acked_set &&
8370 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
8372 * We already turned this on since we are inside
8373 * the previous one was a partially sack now we
8374 * are getting another one (maybe all of it).
8376 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
8378 * Lets make sure we have all of it though.
8380 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
8381 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8382 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8383 rack->r_ctl.last_tlp_acked_end);
8385 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
8386 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8387 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8388 rack->r_ctl.last_tlp_acked_end);
8391 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8392 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8393 rack->rc_last_tlp_past_cumack = 0;
8394 rack->rc_last_tlp_acked_set = 1;
8395 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
8398 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
8399 changed += (rsm->r_end - rsm->r_start);
8400 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
8401 if (rsm->r_in_tmap) /* should be true */
8402 rack_log_sack_passed(tp, rack, rsm);
8403 /* Is Reordering occuring? */
8404 if (rsm->r_flags & RACK_SACK_PASSED) {
8405 rsm->r_flags &= ~RACK_SACK_PASSED;
8406 rack->r_ctl.rc_reorder_ts = cts;
8408 if (rack->app_limited_needs_set)
8409 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
8410 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8411 rsm->r_flags |= RACK_ACKED;
8412 if (rsm->r_in_tmap) {
8413 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8416 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__);
8418 counter_u64_add(rack_sack_skipped_acked, 1);
8421 if (end == rsm->r_end) {
8422 /* This block only - done, setup for next */
8426 * There is more not coverend by this rsm move on
8427 * to the next block in the RB tree.
8429 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8437 * The end of this sack block is smaller than
8442 if ((rsm->r_flags & RACK_ACKED) == 0) {
8444 * Is it a TLP of interest?
8446 if ((rsm->r_flags & RACK_TLP) &&
8447 (rsm->r_rtr_cnt > 1)) {
8449 * We are splitting a rxt TLP, check
8450 * if we need to save off the start/end
8452 if (rack->rc_last_tlp_acked_set &&
8453 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
8455 * We already turned this on since we are inside
8456 * the previous one was a partially sack now we
8457 * are getting another one (maybe all of it).
8459 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
8461 * Lets make sure we have all of it though.
8463 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
8464 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8465 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8466 rack->r_ctl.last_tlp_acked_end);
8468 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
8469 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8470 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8471 rack->r_ctl.last_tlp_acked_end);
8474 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8475 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8476 rack->rc_last_tlp_past_cumack = 0;
8477 rack->rc_last_tlp_acked_set = 1;
8478 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
8481 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8483 (prev->r_flags & RACK_ACKED)) {
8485 * Goal, we want the right remainder of rsm to shrink
8486 * in place and span from (rsm->r_start = end) to rsm->r_end.
8487 * We want to expand prev to go all the way
8488 * to prev->r_end <- end.
8489 * so in the tree we have before:
8490 * prev |--------| (acked)
8491 * rsm |-------| (non-acked)
8493 * We churn it so we end up with
8494 * prev |----------| (acked)
8495 * rsm |-----| (non-acked)
8496 * nrsm |-| (temporary)
8498 * Note if either prev/rsm is a TLP we don't
8502 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
8505 /* Now adjust nrsm (stack copy) to be
8506 * the one that is the small
8507 * piece that was "sacked".
8511 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8513 * Now that the rsm has had its start moved forward
8514 * lets go ahead and get its new place in the world.
8516 rack_setup_offset_for_rsm(prev, rsm);
8518 * Now nrsm is our new little piece
8519 * that is acked (which was merged
8520 * to prev). Update the rtt and changed
8521 * based on that. Also check for reordering.
8523 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
8524 if (rack->app_limited_needs_set)
8525 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
8526 changed += (nrsm->r_end - nrsm->r_start);
8527 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
8528 if (nrsm->r_flags & RACK_SACK_PASSED) {
8529 rack->r_ctl.rc_reorder_ts = cts;
8531 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__);
8533 counter_u64_add(rack_sack_used_prev_merge, 1);
8536 * This is the case where our previous
8537 * block is not acked either, so we must
8538 * split the block in two.
8540 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
8542 /* failed rrs what can we do but loose the sack info? */
8545 if ((rsm->r_flags & RACK_TLP) &&
8546 (rsm->r_rtr_cnt > 1)) {
8548 * We are splitting a rxt TLP, check
8549 * if we need to save off the start/end
8551 if (rack->rc_last_tlp_acked_set &&
8552 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
8554 * We already turned this on since this block is inside
8555 * the previous one was a partially sack now we
8556 * are getting another one (maybe all of it).
8558 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
8560 * Lets make sure we have all of it though.
8562 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
8563 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8564 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8565 rack->r_ctl.last_tlp_acked_end);
8567 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
8568 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8569 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8570 rack->r_ctl.last_tlp_acked_end);
8573 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8574 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8575 rack->rc_last_tlp_acked_set = 1;
8576 rack->rc_last_tlp_past_cumack = 0;
8577 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
8581 * In this case nrsm becomes
8582 * nrsm->r_start = end;
8583 * nrsm->r_end = rsm->r_end;
8584 * which is un-acked.
8586 * rsm->r_end = nrsm->r_start;
8587 * i.e. the remaining un-acked
8588 * piece is left on the left
8591 * So we start like this
8592 * rsm |----------| (not acked)
8594 * build it so we have
8596 * nrsm |------| (not acked)
8598 counter_u64_add(rack_sack_splits, 1);
8599 rack_clone_rsm(rack, nrsm, rsm, end);
8600 rsm->r_flags &= (~RACK_HAS_FIN);
8601 rsm->r_just_ret = 0;
8603 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8605 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8606 if (insret != NULL) {
8607 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
8608 nrsm, insret, rack, rsm);
8611 if (rsm->r_in_tmap) {
8612 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8613 nrsm->r_in_tmap = 1;
8616 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
8617 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
8618 changed += (rsm->r_end - rsm->r_start);
8619 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
8620 if (rsm->r_in_tmap) /* should be true */
8621 rack_log_sack_passed(tp, rack, rsm);
8622 /* Is Reordering occuring? */
8623 if (rsm->r_flags & RACK_SACK_PASSED) {
8624 rsm->r_flags &= ~RACK_SACK_PASSED;
8625 rack->r_ctl.rc_reorder_ts = cts;
8627 if (rack->app_limited_needs_set)
8628 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
8629 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8630 rsm->r_flags |= RACK_ACKED;
8631 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__);
8632 if (rsm->r_in_tmap) {
8633 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8637 } else if (start != end){
8639 * The block was already acked.
8641 counter_u64_add(rack_sack_skipped_acked, 1);
8646 ((rsm->r_flags & RACK_TLP) == 0) &&
8647 (rsm->r_flags & RACK_ACKED)) {
8649 * Now can we merge where we worked
8650 * with either the previous or
8653 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8655 if (next->r_flags & RACK_TLP)
8657 if (next->r_flags & RACK_ACKED) {
8658 /* yep this and next can be merged */
8659 rsm = rack_merge_rsm(rack, rsm, next);
8660 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8664 /* Now what about the previous? */
8665 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8667 if (prev->r_flags & RACK_TLP)
8669 if (prev->r_flags & RACK_ACKED) {
8670 /* yep the previous and this can be merged */
8671 rsm = rack_merge_rsm(rack, prev, rsm);
8672 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8677 if (used_ref == 0) {
8678 counter_u64_add(rack_sack_proc_all, 1);
8680 counter_u64_add(rack_sack_proc_short, 1);
8682 /* Save off the next one for quick reference. */
8684 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8687 *prsm = rack->r_ctl.rc_sacklast = nrsm;
8688 /* Pass back the moved. */
8694 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
8696 struct rack_sendmap *tmap;
8699 while (rsm && (rsm->r_flags & RACK_ACKED)) {
8700 /* Its no longer sacked, mark it so */
8701 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
8703 if (rsm->r_in_tmap) {
8704 panic("rack:%p rsm:%p flags:0x%x in tmap?",
8705 rack, rsm, rsm->r_flags);
8708 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
8709 /* Rebuild it into our tmap */
8711 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8714 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
8717 tmap->r_in_tmap = 1;
8718 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8721 * Now lets possibly clear the sack filter so we start
8722 * recognizing sacks that cover this area.
8724 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
8729 rack_do_decay(struct tcp_rack *rack)
8733 #define timersub(tvp, uvp, vvp) \
8735 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
8736 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
8737 if ((vvp)->tv_usec < 0) { \
8739 (vvp)->tv_usec += 1000000; \
8743 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res);
8746 rack->r_ctl.input_pkt++;
8747 if ((rack->rc_in_persist) ||
8748 (res.tv_sec >= 1) ||
8749 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) {
8751 * Check for decay of non-SAD,
8752 * we want all SAD detection metrics to
8753 * decay 1/4 per second (or more) passed.
8755 #ifdef NETFLIX_EXP_DETECTION
8758 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt;
8760 /* Update our saved tracking values */
8761 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt;
8762 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
8763 /* Now do we escape without decay? */
8764 #ifdef NETFLIX_EXP_DETECTION
8765 if (rack->rc_in_persist ||
8766 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) ||
8767 (pkt_delta < tcp_sad_low_pps)){
8769 * We don't decay idle connections
8770 * or ones that have a low input pps.
8774 /* Decay the counters */
8775 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count,
8777 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count,
8779 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra,
8781 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move,
8788 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to)
8790 struct rack_sendmap *rsm;
8792 struct rack_sendmap *rm;
8796 * The ACK point is advancing to th_ack, we must drop off
8797 * the packets in the rack log and calculate any eligble
8800 rack->r_wanted_output = 1;
8802 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */
8803 if ((rack->rc_last_tlp_acked_set == 1)&&
8804 (rack->rc_last_tlp_past_cumack == 1) &&
8805 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) {
8807 * We have reached the point where our last rack
8808 * tlp retransmit sequence is ahead of the cum-ack.
8809 * This can only happen when the cum-ack moves all
8810 * the way around (its been a full 2^^31+1 bytes
8811 * or more since we sent a retransmitted TLP). Lets
8812 * turn off the valid flag since its not really valid.
8814 * Note since sack's also turn on this event we have
8815 * a complication, we have to wait to age it out until
8816 * the cum-ack is by the TLP before checking which is
8817 * what the next else clause does.
8819 rack_log_dsack_event(rack, 9, __LINE__,
8820 rack->r_ctl.last_tlp_acked_start,
8821 rack->r_ctl.last_tlp_acked_end);
8822 rack->rc_last_tlp_acked_set = 0;
8823 rack->rc_last_tlp_past_cumack = 0;
8824 } else if ((rack->rc_last_tlp_acked_set == 1) &&
8825 (rack->rc_last_tlp_past_cumack == 0) &&
8826 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) {
8828 * It is safe to start aging TLP's out.
8830 rack->rc_last_tlp_past_cumack = 1;
8832 /* We do the same for the tlp send seq as well */
8833 if ((rack->rc_last_sent_tlp_seq_valid == 1) &&
8834 (rack->rc_last_sent_tlp_past_cumack == 1) &&
8835 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) {
8836 rack_log_dsack_event(rack, 9, __LINE__,
8837 rack->r_ctl.last_sent_tlp_seq,
8838 (rack->r_ctl.last_sent_tlp_seq +
8839 rack->r_ctl.last_sent_tlp_len));
8840 rack->rc_last_sent_tlp_seq_valid = 0;
8841 rack->rc_last_sent_tlp_past_cumack = 0;
8842 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) &&
8843 (rack->rc_last_sent_tlp_past_cumack == 0) &&
8844 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) {
8846 * It is safe to start aging TLP's send.
8848 rack->rc_last_sent_tlp_past_cumack = 1;
8851 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
8853 if ((th_ack - 1) == tp->iss) {
8855 * For the SYN incoming case we will not
8856 * have called tcp_output for the sending of
8857 * the SYN, so there will be no map. All
8858 * other cases should probably be a panic.
8862 if (tp->t_flags & TF_SENTFIN) {
8863 /* if we sent a FIN we often will not have map */
8867 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n",
8869 tp->t_state, th_ack, rack,
8870 tp->snd_una, tp->snd_max, tp->snd_nxt);
8874 if (SEQ_LT(th_ack, rsm->r_start)) {
8875 /* Huh map is missing this */
8877 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
8879 th_ack, tp->t_state, rack->r_state);
8883 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack);
8885 /* Now was it a retransmitted TLP? */
8886 if ((rsm->r_flags & RACK_TLP) &&
8887 (rsm->r_rtr_cnt > 1)) {
8889 * Yes, this rsm was a TLP and retransmitted, remember that
8890 * since if a DSACK comes back on this we don't want
8891 * to think of it as a reordered segment. This may
8892 * get updated again with possibly even other TLPs
8893 * in flight, but thats ok. Only when we don't send
8894 * a retransmitted TLP for 1/2 the sequences space
8895 * will it get turned off (above).
8897 if (rack->rc_last_tlp_acked_set &&
8898 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
8900 * We already turned this on since the end matches,
8901 * the previous one was a partially ack now we
8902 * are getting another one (maybe all of it).
8904 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
8906 * Lets make sure we have all of it though.
8908 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
8909 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8910 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8911 rack->r_ctl.last_tlp_acked_end);
8913 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
8914 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8915 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
8916 rack->r_ctl.last_tlp_acked_end);
8919 rack->rc_last_tlp_past_cumack = 1;
8920 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
8921 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
8922 rack->rc_last_tlp_acked_set = 1;
8923 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
8926 /* Now do we consume the whole thing? */
8927 if (SEQ_GEQ(th_ack, rsm->r_end)) {
8928 /* Its all consumed. */
8930 uint8_t newly_acked;
8932 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__);
8933 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
8934 rsm->r_rtr_bytes = 0;
8935 /* Record the time of highest cumack sent */
8936 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8938 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8940 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8942 panic("removing head in rack:%p rsm:%p rm:%p",
8946 if (rsm->r_in_tmap) {
8947 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8951 if (rsm->r_flags & RACK_ACKED) {
8953 * It was acked on the scoreboard -- remove
8956 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
8958 } else if (rsm->r_flags & RACK_SACK_PASSED) {
8960 * There are segments ACKED on the
8961 * scoreboard further up. We are seeing
8964 rsm->r_flags &= ~RACK_SACK_PASSED;
8965 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8966 rsm->r_flags |= RACK_ACKED;
8967 rack->r_ctl.rc_reorder_ts = cts;
8968 if (rack->r_ent_rec_ns) {
8970 * We have sent no more, and we saw an sack
8973 rack->r_might_revert = 1;
8976 if ((rsm->r_flags & RACK_TO_REXT) &&
8977 (tp->t_flags & TF_RCVD_TSTMP) &&
8978 (to->to_flags & TOF_TS) &&
8979 (to->to_tsecr != 0) &&
8980 (tp->t_flags & TF_PREVVALID)) {
8982 * We can use the timestamp to see
8983 * if this retransmission was from the
8984 * first transmit. If so we made a mistake.
8986 tp->t_flags &= ~TF_PREVVALID;
8987 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) {
8988 /* The first transmit is what this ack is for */
8989 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__);
8992 left = th_ack - rsm->r_end;
8993 if (rack->app_limited_needs_set && newly_acked)
8994 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK);
8995 /* Free back to zone */
8996 rack_free(rack, rsm);
9000 /* Check for reneging */
9001 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
9002 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
9004 * The peer has moved snd_una up to
9005 * the edge of this send, i.e. one
9006 * that it had previously acked. The only
9007 * way that can be true if the peer threw
9008 * away data (space issues) that it had
9009 * previously sacked (else it would have
9010 * given us snd_una up to (rsm->r_end).
9011 * We need to undo the acked markings here.
9013 * Note we have to look to make sure th_ack is
9014 * our rsm->r_start in case we get an old ack
9015 * where th_ack is behind snd_una.
9017 rack_peer_reneges(rack, rsm, th_ack);
9021 if (rsm->r_flags & RACK_ACKED) {
9023 * It was acked on the scoreboard -- remove it from
9024 * total for the part being cum-acked.
9026 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
9029 * Clear the dup ack count for
9030 * the piece that remains.
9033 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
9034 if (rsm->r_rtr_bytes) {
9036 * It was retransmitted adjust the
9037 * sack holes for what was acked.
9041 ack_am = (th_ack - rsm->r_start);
9042 if (ack_am >= rsm->r_rtr_bytes) {
9043 rack->r_ctl.rc_holes_rxt -= ack_am;
9044 rsm->r_rtr_bytes -= ack_am;
9048 * Update where the piece starts and record
9049 * the time of send of highest cumack sent.
9051 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
9052 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__);
9053 /* Now we need to move our offset forward too */
9054 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) {
9055 /* Fix up the orig_m_len and possibly the mbuf offset */
9056 rack_adjust_orig_mlen(rsm);
9058 rsm->soff += (th_ack - rsm->r_start);
9059 rsm->r_start = th_ack;
9060 /* Now do we need to move the mbuf fwd too? */
9062 while (rsm->soff >= rsm->m->m_len) {
9063 rsm->soff -= rsm->m->m_len;
9064 rsm->m = rsm->m->m_next;
9065 KASSERT((rsm->m != NULL),
9066 (" nrsm:%p hit at soff:%u null m",
9069 rsm->orig_m_len = rsm->m->m_len;
9071 if (rack->app_limited_needs_set)
9072 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG);
9076 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack)
9078 struct rack_sendmap *rsm;
9079 int sack_pass_fnd = 0;
9081 if (rack->r_might_revert) {
9083 * Ok we have reordering, have not sent anything, we
9084 * might want to revert the congestion state if nothing
9085 * further has SACK_PASSED on it. Lets check.
9087 * We also get here when we have DSACKs come in for
9088 * all the data that we FR'd. Note that a rxt or tlp
9089 * timer clears this from happening.
9092 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
9093 if (rsm->r_flags & RACK_SACK_PASSED) {
9098 if (sack_pass_fnd == 0) {
9100 * We went into recovery
9101 * incorrectly due to reordering!
9105 rack->r_ent_rec_ns = 0;
9106 orig_cwnd = tp->snd_cwnd;
9107 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec;
9108 tp->snd_recover = tp->snd_una;
9109 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__);
9110 EXIT_RECOVERY(tp->t_flags);
9112 rack->r_might_revert = 0;
9116 #ifdef NETFLIX_EXP_DETECTION
9118 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz)
9120 if ((rack->do_detection || tcp_force_detection) &&
9121 tcp_sack_to_ack_thresh &&
9122 tcp_sack_to_move_thresh &&
9123 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) {
9125 * We have thresholds set to find
9126 * possible attackers and disable sack.
9129 uint64_t ackratio, moveratio, movetotal;
9132 rack_log_sad(rack, 1);
9133 ackratio = (uint64_t)(rack->r_ctl.sack_count);
9134 ackratio *= (uint64_t)(1000);
9135 if (rack->r_ctl.ack_count)
9136 ackratio /= (uint64_t)(rack->r_ctl.ack_count);
9138 /* We really should not hit here */
9141 if ((rack->sack_attack_disable == 0) &&
9142 (ackratio > rack_highest_sack_thresh_seen))
9143 rack_highest_sack_thresh_seen = (uint32_t)ackratio;
9144 movetotal = rack->r_ctl.sack_moved_extra;
9145 movetotal += rack->r_ctl.sack_noextra_move;
9146 moveratio = rack->r_ctl.sack_moved_extra;
9147 moveratio *= (uint64_t)1000;
9149 moveratio /= movetotal;
9151 /* No moves, thats pretty good */
9154 if ((rack->sack_attack_disable == 0) &&
9155 (moveratio > rack_highest_move_thresh_seen))
9156 rack_highest_move_thresh_seen = (uint32_t)moveratio;
9157 if (rack->sack_attack_disable == 0) {
9158 if ((ackratio > tcp_sack_to_ack_thresh) &&
9159 (moveratio > tcp_sack_to_move_thresh)) {
9160 /* Disable sack processing */
9161 rack->sack_attack_disable = 1;
9162 if (rack->r_rep_attack == 0) {
9163 rack->r_rep_attack = 1;
9164 counter_u64_add(rack_sack_attacks_detected, 1);
9166 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_TRIGGERED);
9167 /* Clamp the cwnd at flight size */
9168 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd;
9169 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
9170 rack_log_sad(rack, 2);
9173 /* We are sack-disabled check for false positives */
9174 if ((ackratio <= tcp_restoral_thresh) ||
9175 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) {
9176 rack->sack_attack_disable = 0;
9177 rack_log_sad(rack, 3);
9178 /* Restart counting */
9179 rack->r_ctl.sack_count = 0;
9180 rack->r_ctl.sack_moved_extra = 0;
9181 rack->r_ctl.sack_noextra_move = 1;
9182 rack->r_ctl.ack_count = max(1,
9183 (bytes_this_ack / segsiz));
9185 if (rack->r_rep_reverse == 0) {
9186 rack->r_rep_reverse = 1;
9187 counter_u64_add(rack_sack_attacks_reversed, 1);
9189 /* Restore the cwnd */
9190 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd)
9191 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd;
9199 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end)
9205 if (SEQ_GT(end, start))
9209 if ((rack->rc_last_tlp_acked_set ) &&
9210 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) &&
9211 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) {
9213 * The DSACK is because of a TLP which we don't
9214 * do anything with the reordering window over since
9215 * it was not reordering that caused the DSACK but
9216 * our previous retransmit TLP.
9218 rack_log_dsack_event(rack, 7, __LINE__, start, end);
9220 goto skip_dsack_round;
9222 if (rack->rc_last_sent_tlp_seq_valid) {
9223 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len;
9224 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) &&
9225 (SEQ_LEQ(end, l_end))) {
9227 * This dsack is from the last sent TLP, ignore it
9228 * for reordering purposes.
9230 rack_log_dsack_event(rack, 7, __LINE__, start, end);
9232 goto skip_dsack_round;
9235 if (rack->rc_dsack_round_seen == 0) {
9236 rack->rc_dsack_round_seen = 1;
9237 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max;
9238 rack->r_ctl.num_dsack++;
9239 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */
9240 rack_log_dsack_event(rack, 2, __LINE__, 0, 0);
9244 * We keep track of how many DSACK blocks we get
9245 * after a recovery incident.
9247 rack->r_ctl.dsack_byte_cnt += am;
9248 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
9249 rack->r_ctl.retran_during_recovery &&
9250 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) {
9252 * False recovery most likely culprit is reordering. If
9253 * nothing else is missing we need to revert.
9255 rack->r_might_revert = 1;
9256 rack_handle_might_revert(rack->rc_tp, rack);
9257 rack->r_might_revert = 0;
9258 rack->r_ctl.retran_during_recovery = 0;
9259 rack->r_ctl.dsack_byte_cnt = 0;
9265 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una)
9267 return (((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt);
9271 rack_compute_pipe(struct tcpcb *tp)
9273 return ((int32_t)do_rack_compute_pipe(tp,
9274 (struct tcp_rack *)tp->t_fb_ptr,
9279 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack)
9281 /* Deal with changed and PRR here (in recovery only) */
9282 uint32_t pipe, snd_una;
9284 rack->r_ctl.rc_prr_delivered += changed;
9286 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) {
9288 * It is all outstanding, we are application limited
9289 * and thus we don't need more room to send anything.
9290 * Note we use tp->snd_una here and not th_ack because
9291 * the data as yet not been cut from the sb.
9293 rack->r_ctl.rc_prr_sndcnt = 0;
9296 /* Compute prr_sndcnt */
9297 if (SEQ_GT(tp->snd_una, th_ack)) {
9298 snd_una = tp->snd_una;
9302 pipe = do_rack_compute_pipe(tp, rack, snd_una);
9303 if (pipe > tp->snd_ssthresh) {
9306 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
9307 if (rack->r_ctl.rc_prr_recovery_fs > 0)
9308 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
9310 rack->r_ctl.rc_prr_sndcnt = 0;
9311 rack_log_to_prr(rack, 9, 0, __LINE__);
9315 if (sndcnt > (long)rack->r_ctl.rc_prr_out)
9316 sndcnt -= rack->r_ctl.rc_prr_out;
9319 rack->r_ctl.rc_prr_sndcnt = sndcnt;
9320 rack_log_to_prr(rack, 10, 0, __LINE__);
9324 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
9325 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
9328 if (changed > limit)
9330 limit += ctf_fixed_maxseg(tp);
9331 if (tp->snd_ssthresh > pipe) {
9332 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
9333 rack_log_to_prr(rack, 11, 0, __LINE__);
9335 rack->r_ctl.rc_prr_sndcnt = min(0, limit);
9336 rack_log_to_prr(rack, 12, 0, __LINE__);
9342 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck)
9345 struct tcp_rack *rack;
9346 struct rack_sendmap *rsm;
9347 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
9348 register uint32_t th_ack;
9349 int32_t i, j, k, num_sack_blks = 0;
9350 uint32_t cts, acked, ack_point;
9351 int loop_start = 0, moved_two = 0;
9355 INP_WLOCK_ASSERT(tptoinpcb(tp));
9356 if (tcp_get_flags(th) & TH_RST) {
9357 /* We don't log resets */
9360 rack = (struct tcp_rack *)tp->t_fb_ptr;
9361 cts = tcp_get_usecs(NULL);
9362 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
9364 th_ack = th->th_ack;
9365 if (rack->sack_attack_disable == 0)
9366 rack_do_decay(rack);
9367 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) {
9369 * You only get credit for
9370 * MSS and greater (and you get extra
9371 * credit for larger cum-ack moves).
9375 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp);
9376 rack->r_ctl.ack_count += ac;
9377 counter_u64_add(rack_ack_total, ac);
9379 if (rack->r_ctl.ack_count > 0xfff00000) {
9381 * reduce the number to keep us under
9384 rack->r_ctl.ack_count /= 2;
9385 rack->r_ctl.sack_count /= 2;
9387 if (SEQ_GT(th_ack, tp->snd_una)) {
9388 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
9389 tp->t_acktime = ticks;
9391 if (rsm && SEQ_GT(th_ack, rsm->r_start))
9392 changed = th_ack - rsm->r_start;
9394 rack_process_to_cumack(tp, rack, th_ack, cts, to);
9396 if ((to->to_flags & TOF_SACK) == 0) {
9397 /* We are done nothing left and no sack. */
9398 rack_handle_might_revert(tp, rack);
9400 * For cases where we struck a dup-ack
9401 * with no SACK, add to the changes so
9402 * PRR will work right.
9404 if (dup_ack_struck && (changed == 0)) {
9405 changed += ctf_fixed_maxseg(rack->rc_tp);
9409 /* Sack block processing */
9410 if (SEQ_GT(th_ack, tp->snd_una))
9413 ack_point = tp->snd_una;
9414 for (i = 0; i < to->to_nsacks; i++) {
9415 bcopy((to->to_sacks + i * TCPOLEN_SACK),
9416 &sack, sizeof(sack));
9417 sack.start = ntohl(sack.start);
9418 sack.end = ntohl(sack.end);
9419 if (SEQ_GT(sack.end, sack.start) &&
9420 SEQ_GT(sack.start, ack_point) &&
9421 SEQ_LT(sack.start, tp->snd_max) &&
9422 SEQ_GT(sack.end, ack_point) &&
9423 SEQ_LEQ(sack.end, tp->snd_max)) {
9424 sack_blocks[num_sack_blks] = sack;
9426 } else if (SEQ_LEQ(sack.start, th_ack) &&
9427 SEQ_LEQ(sack.end, th_ack)) {
9430 was_tlp = rack_note_dsack(rack, sack.start, sack.end);
9432 * Its a D-SACK block.
9434 tcp_record_dsack(tp, sack.start, sack.end, was_tlp);
9437 if (rack->rc_dsack_round_seen) {
9438 /* Is the dsack roound over? */
9439 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) {
9441 rack->rc_dsack_round_seen = 0;
9442 rack_log_dsack_event(rack, 3, __LINE__, 0, 0);
9446 * Sort the SACK blocks so we can update the rack scoreboard with
9449 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks,
9450 num_sack_blks, th->th_ack);
9451 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks);
9452 if (num_sack_blks == 0) {
9453 /* Nothing to sack (DSACKs?) */
9454 goto out_with_totals;
9456 if (num_sack_blks < 2) {
9457 /* Only one, we don't need to sort */
9460 /* Sort the sacks */
9461 for (i = 0; i < num_sack_blks; i++) {
9462 for (j = i + 1; j < num_sack_blks; j++) {
9463 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
9464 sack = sack_blocks[i];
9465 sack_blocks[i] = sack_blocks[j];
9466 sack_blocks[j] = sack;
9471 * Now are any of the sack block ends the same (yes some
9472 * implementations send these)?
9475 if (num_sack_blks == 0)
9476 goto out_with_totals;
9477 if (num_sack_blks > 1) {
9478 for (i = 0; i < num_sack_blks; i++) {
9479 for (j = i + 1; j < num_sack_blks; j++) {
9480 if (sack_blocks[i].end == sack_blocks[j].end) {
9482 * Ok these two have the same end we
9483 * want the smallest end and then
9484 * throw away the larger and start
9487 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
9489 * The second block covers
9490 * more area use that
9492 sack_blocks[i].start = sack_blocks[j].start;
9495 * Now collapse out the dup-sack and
9498 for (k = (j + 1); k < num_sack_blks; k++) {
9499 sack_blocks[j].start = sack_blocks[k].start;
9500 sack_blocks[j].end = sack_blocks[k].end;
9511 * First lets look to see if
9512 * we have retransmitted and
9513 * can use the transmit next?
9515 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
9517 SEQ_GT(sack_blocks[0].end, rsm->r_start) &&
9518 SEQ_LT(sack_blocks[0].start, rsm->r_end)) {
9520 * We probably did the FR and the next
9521 * SACK in continues as we would expect.
9523 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two);
9525 rack->r_wanted_output = 1;
9528 if (num_sack_blks == 1) {
9530 * This is what we would expect from
9531 * a normal implementation to happen
9532 * after we have retransmitted the FR,
9533 * i.e the sack-filter pushes down
9534 * to 1 block and the next to be retransmitted
9535 * is the sequence in the sack block (has more
9536 * are acked). Count this as ACK'd data to boost
9537 * up the chances of recovering any false positives.
9539 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp));
9540 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp)));
9541 counter_u64_add(rack_express_sack, 1);
9542 if (rack->r_ctl.ack_count > 0xfff00000) {
9544 * reduce the number to keep us under
9547 rack->r_ctl.ack_count /= 2;
9548 rack->r_ctl.sack_count /= 2;
9550 goto out_with_totals;
9553 * Start the loop through the
9554 * rest of blocks, past the first block.
9560 /* Its a sack of some sort */
9561 rack->r_ctl.sack_count++;
9562 if (rack->r_ctl.sack_count > 0xfff00000) {
9564 * reduce the number to keep us under
9567 rack->r_ctl.ack_count /= 2;
9568 rack->r_ctl.sack_count /= 2;
9570 counter_u64_add(rack_sack_total, 1);
9571 if (rack->sack_attack_disable) {
9572 /* An attacker disablement is in place */
9573 if (num_sack_blks > 1) {
9574 rack->r_ctl.sack_count += (num_sack_blks - 1);
9575 rack->r_ctl.sack_moved_extra++;
9576 counter_u64_add(rack_move_some, 1);
9577 if (rack->r_ctl.sack_moved_extra > 0xfff00000) {
9578 rack->r_ctl.sack_moved_extra /= 2;
9579 rack->r_ctl.sack_noextra_move /= 2;
9584 rsm = rack->r_ctl.rc_sacklast;
9585 for (i = loop_start; i < num_sack_blks; i++) {
9586 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two);
9588 rack->r_wanted_output = 1;
9593 * If we did not get a SACK for at least a MSS and
9594 * had to move at all, or if we moved more than our
9595 * threshold, it counts against the "extra" move.
9597 rack->r_ctl.sack_moved_extra += moved_two;
9598 counter_u64_add(rack_move_some, 1);
9601 * else we did not have to move
9602 * any more than we would expect.
9604 rack->r_ctl.sack_noextra_move++;
9605 counter_u64_add(rack_move_none, 1);
9607 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) {
9609 * If the SACK was not a full MSS then
9610 * we add to sack_count the number of
9611 * MSS's (or possibly more than
9612 * a MSS if its a TSO send) we had to skip by.
9614 rack->r_ctl.sack_count += moved_two;
9615 counter_u64_add(rack_sack_total, moved_two);
9618 * Now we need to setup for the next
9619 * round. First we make sure we won't
9620 * exceed the size of our uint32_t on
9621 * the various counts, and then clear out
9624 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) ||
9625 (rack->r_ctl.sack_noextra_move > 0xfff00000)) {
9626 rack->r_ctl.sack_moved_extra /= 2;
9627 rack->r_ctl.sack_noextra_move /= 2;
9629 if (rack->r_ctl.sack_count > 0xfff00000) {
9630 rack->r_ctl.ack_count /= 2;
9631 rack->r_ctl.sack_count /= 2;
9636 if (num_sack_blks > 1) {
9638 * You get an extra stroke if
9639 * you have more than one sack-blk, this
9640 * could be where we are skipping forward
9641 * and the sack-filter is still working, or
9642 * it could be an attacker constantly
9645 rack->r_ctl.sack_moved_extra++;
9646 counter_u64_add(rack_move_some, 1);
9649 #ifdef NETFLIX_EXP_DETECTION
9650 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp));
9653 /* Something changed cancel the rack timer */
9654 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9656 tsused = tcp_get_usecs(NULL);
9657 rsm = tcp_rack_output(tp, rack, tsused);
9658 if ((!IN_FASTRECOVERY(tp->t_flags)) &&
9660 ((rsm->r_flags & RACK_MUST_RXT) == 0)) {
9661 /* Enter recovery */
9662 entered_recovery = 1;
9663 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
9665 * When we enter recovery we need to assure we send
9668 if (rack->rack_no_prr == 0) {
9669 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
9670 rack_log_to_prr(rack, 8, 0, __LINE__);
9672 rack->r_timer_override = 1;
9674 rack->r_ctl.rc_agg_early = 0;
9675 } else if (IN_FASTRECOVERY(tp->t_flags) &&
9677 (rack->r_rr_config == 3)) {
9679 * Assure we can output and we get no
9680 * remembered pace time except the retransmit.
9682 rack->r_timer_override = 1;
9683 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
9684 rack->r_ctl.rc_resend = rsm;
9686 if (IN_FASTRECOVERY(tp->t_flags) &&
9687 (rack->rack_no_prr == 0) &&
9688 (entered_recovery == 0)) {
9689 rack_update_prr(tp, rack, changed, th_ack);
9690 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) &&
9691 ((tcp_in_hpts(rack->rc_inp) == 0) &&
9692 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) {
9694 * If you are pacing output you don't want
9698 rack->r_ctl.rc_agg_early = 0;
9699 rack->r_timer_override = 1;
9705 rack_strike_dupack(struct tcp_rack *rack)
9707 struct rack_sendmap *rsm;
9709 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
9710 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
9711 rsm = TAILQ_NEXT(rsm, r_tnext);
9712 if (rsm->r_flags & RACK_MUST_RXT) {
9713 /* Sendmap entries that are marked to
9714 * be retransmitted do not need dupack's
9715 * struck. We get these marks for a number
9716 * of reasons (rxt timeout with no sack,
9717 * mtu change, or rwnd collapses). When
9718 * these events occur, we know we must retransmit
9719 * them and mark the sendmap entries. Dupack counting
9720 * is not needed since we are already set to retransmit
9721 * it as soon as we can.
9726 if (rsm && (rsm->r_dupack < 0xff)) {
9728 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) {
9732 * Here we see if we need to retransmit. For
9733 * a SACK type connection if enough time has passed
9734 * we will get a return of the rsm. For a non-sack
9735 * connection we will get the rsm returned if the
9736 * dupack value is 3 or more.
9738 cts = tcp_get_usecs(&tv);
9739 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts);
9740 if (rack->r_ctl.rc_resend != NULL) {
9741 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) {
9742 rack_cong_signal(rack->rc_tp, CC_NDUPACK,
9743 rack->rc_tp->snd_una, __LINE__);
9745 rack->r_wanted_output = 1;
9746 rack->r_timer_override = 1;
9747 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3);
9750 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3);
9756 rack_check_bottom_drag(struct tcpcb *tp,
9757 struct tcp_rack *rack,
9758 struct socket *so, int32_t acked)
9760 uint32_t segsiz, minseg;
9762 segsiz = ctf_fixed_maxseg(tp);
9765 if (tp->snd_max == tp->snd_una) {
9767 * We are doing dynamic pacing and we are way
9768 * under. Basically everything got acked while
9769 * we were still waiting on the pacer to expire.
9771 * This means we need to boost the b/w in
9772 * addition to any earlier boosting of
9775 rack->rc_dragged_bottom = 1;
9776 rack_validate_multipliers_at_or_above100(rack);
9778 * Lets use the segment bytes acked plus
9779 * the lowest RTT seen as the basis to
9780 * form a b/w estimate. This will be off
9781 * due to the fact that the true estimate
9782 * should be around 1/2 the time of the RTT
9783 * but we can settle for that.
9785 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) &&
9787 uint64_t bw, calc_bw, rtt;
9789 rtt = rack->r_ctl.rack_rs.rs_us_rtt;
9791 /* no us sample is there a ms one? */
9792 if (rack->r_ctl.rack_rs.rs_rtt_lowest) {
9793 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
9795 goto no_measurement;
9799 calc_bw = bw * 1000000;
9801 if (rack->r_ctl.last_max_bw &&
9802 (rack->r_ctl.last_max_bw < calc_bw)) {
9804 * If we have a last calculated max bw
9807 calc_bw = rack->r_ctl.last_max_bw;
9809 /* now plop it in */
9810 if (rack->rc_gp_filled == 0) {
9811 if (calc_bw > ONE_POINT_TWO_MEG) {
9813 * If we have no measurement
9814 * don't let us set in more than
9815 * 1.2Mbps. If we are still too
9816 * low after pacing with this we
9817 * will hopefully have a max b/w
9818 * available to sanity check things.
9820 calc_bw = ONE_POINT_TWO_MEG;
9822 rack->r_ctl.rc_rtt_diff = 0;
9823 rack->r_ctl.gp_bw = calc_bw;
9824 rack->rc_gp_filled = 1;
9825 if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
9826 rack->r_ctl.num_measurements = RACK_REQ_AVG;
9827 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
9828 } else if (calc_bw > rack->r_ctl.gp_bw) {
9829 rack->r_ctl.rc_rtt_diff = 0;
9830 if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
9831 rack->r_ctl.num_measurements = RACK_REQ_AVG;
9832 rack->r_ctl.gp_bw = calc_bw;
9833 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
9835 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9836 if ((rack->gp_ready == 0) &&
9837 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
9838 /* We have enough measurements now */
9840 rack_set_cc_pacing(rack);
9841 if (rack->defer_options)
9842 rack_apply_deferred_options(rack);
9845 * For acks over 1mss we do a extra boost to simulate
9846 * where we would get 2 acks (we want 110 for the mul).
9849 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9852 * zero rtt possibly?, settle for just an old increase.
9855 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9857 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
9858 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)),
9860 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) &&
9861 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) &&
9862 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <=
9863 (segsiz * rack_req_segs))) {
9865 * We are doing dynamic GP pacing and
9866 * we have everything except 1MSS or less
9867 * bytes left out. We are still pacing away.
9868 * And there is data that could be sent, This
9869 * means we are inserting delayed ack time in
9870 * our measurements because we are pacing too slow.
9872 rack_validate_multipliers_at_or_above100(rack);
9873 rack->rc_dragged_bottom = 1;
9874 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9881 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount)
9884 * The fast output path is enabled and we
9885 * have moved the cumack forward. Lets see if
9886 * we can expand forward the fast path length by
9887 * that amount. What we would ideally like to
9888 * do is increase the number of bytes in the
9889 * fast path block (left_to_send) by the
9890 * acked amount. However we have to gate that
9892 * 1) The amount outstanding and the rwnd of the peer
9893 * (i.e. we don't want to exceed the rwnd of the peer).
9895 * 2) The amount of data left in the socket buffer (i.e.
9896 * we can't send beyond what is in the buffer).
9898 * Note that this does not take into account any increase
9899 * in the cwnd. We will only extend the fast path by
9902 uint32_t new_total, gating_val;
9904 new_total = acked_amount + rack->r_ctl.fsb.left_to_send;
9905 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)),
9906 (tp->snd_wnd - (tp->snd_max - tp->snd_una)));
9907 if (new_total <= gating_val) {
9908 /* We can increase left_to_send by the acked amount */
9909 counter_u64_add(rack_extended_rfo, 1);
9910 rack->r_ctl.fsb.left_to_send = new_total;
9911 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))),
9912 ("rack:%p left_to_send:%u sbavail:%u out:%u",
9913 rack, rack->r_ctl.fsb.left_to_send,
9914 sbavail(&rack->rc_inp->inp_socket->so_snd),
9915 (tp->snd_max - tp->snd_una)));
9921 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una)
9924 * Here any sendmap entry that points to the
9925 * beginning mbuf must be adjusted to the correct
9926 * offset. This must be called with:
9927 * 1) The socket buffer locked
9928 * 2) snd_una adjusted to its new position.
9930 * Note that (2) implies rack_ack_received has also
9933 * We grab the first mbuf in the socket buffer and
9934 * then go through the front of the sendmap, recalculating
9935 * the stored offset for any sendmap entry that has
9936 * that mbuf. We must use the sb functions to do this
9937 * since its possible an add was done has well as
9938 * the subtraction we may have just completed. This should
9939 * not be a penalty though, since we just referenced the sb
9940 * to go in and trim off the mbufs that we freed (of course
9941 * there will be a penalty for the sendmap references though).
9944 struct rack_sendmap *rsm;
9946 SOCKBUF_LOCK_ASSERT(sb);
9948 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
9949 if ((rsm == NULL) || (m == NULL)) {
9950 /* Nothing outstanding */
9953 while (rsm->m && (rsm->m == m)) {
9959 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff);
9960 if (rsm->orig_m_len != m->m_len) {
9961 rack_adjust_orig_mlen(rsm);
9963 if (rsm->soff != soff) {
9965 * This is not a fatal error, we anticipate it
9966 * might happen (the else code), so we count it here
9967 * so that under invariant we can see that it really
9970 counter_u64_add(rack_adjust_map_bw, 1);
9975 rsm->orig_m_len = rsm->m->m_len;
9977 rsm->orig_m_len = 0;
9979 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff);
9981 rsm->orig_m_len = rsm->m->m_len;
9983 rsm->orig_m_len = 0;
9985 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
9993 * Return value of 1, we do not need to call rack_process_data().
9994 * return value of 0, rack_process_data can be called.
9995 * For ret_val if its 0 the TCP is locked, if its non-zero
9996 * its unlocked and probably unsafe to touch the TCB.
9999 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
10000 struct tcpcb *tp, struct tcpopt *to,
10001 uint32_t tiwin, int32_t tlen,
10002 int32_t * ofia, int32_t thflags, int32_t *ret_val)
10004 int32_t ourfinisacked = 0;
10005 int32_t nsegs, acked_amount;
10007 struct mbuf *mfree;
10008 struct tcp_rack *rack;
10009 int32_t under_pacing = 0;
10010 int32_t recovery = 0;
10012 INP_WLOCK_ASSERT(tptoinpcb(tp));
10014 rack = (struct tcp_rack *)tp->t_fb_ptr;
10015 if (SEQ_GT(th->th_ack, tp->snd_max)) {
10016 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val,
10017 &rack->r_ctl.challenge_ack_ts,
10018 &rack->r_ctl.challenge_ack_cnt);
10019 rack->r_wanted_output = 1;
10022 if (rack->gp_ready &&
10023 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
10026 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
10027 int in_rec, dup_ack_struck = 0;
10029 in_rec = IN_FASTRECOVERY(tp->t_flags);
10030 if (rack->rc_in_persist) {
10031 tp->t_rxtshift = 0;
10032 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
10033 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
10035 if ((th->th_ack == tp->snd_una) &&
10036 (tiwin == tp->snd_wnd) &&
10037 ((to->to_flags & TOF_SACK) == 0)) {
10038 rack_strike_dupack(rack);
10039 dup_ack_struck = 1;
10041 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck);
10043 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
10045 * Old ack, behind (or duplicate to) the last one rcv'd
10046 * Note: We mark reordering is occuring if its
10047 * less than and we have not closed our window.
10049 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) {
10050 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
10055 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
10056 * something we sent.
10058 if (tp->t_flags & TF_NEEDSYN) {
10060 * T/TCP: Connection was half-synchronized, and our SYN has
10061 * been ACK'd (so connection is now fully synchronized). Go
10062 * to non-starred state, increment snd_una for ACK of SYN,
10063 * and check if we can do window scaling.
10065 tp->t_flags &= ~TF_NEEDSYN;
10067 /* Do window scaling? */
10068 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
10069 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
10070 tp->rcv_scale = tp->request_r_scale;
10071 /* Send window already scaled. */
10074 nsegs = max(1, m->m_pkthdr.lro_nsegs);
10076 acked = BYTES_THIS_ACK(tp, th);
10079 * Any time we move the cum-ack forward clear
10080 * keep-alive tied probe-not-answered. The
10081 * persists clears its own on entry.
10083 rack->probe_not_answered = 0;
10085 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
10086 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
10088 * If we just performed our first retransmit, and the ACK arrives
10089 * within our recovery window, then it was a mistake to do the
10090 * retransmit in the first place. Recover our original cwnd and
10091 * ssthresh, and proceed to transmit where we left off.
10093 if ((tp->t_flags & TF_PREVVALID) &&
10094 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
10095 tp->t_flags &= ~TF_PREVVALID;
10096 if (tp->t_rxtshift == 1 &&
10097 (int)(ticks - tp->t_badrxtwin) < 0)
10098 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__);
10101 /* assure we are not backed off */
10102 tp->t_rxtshift = 0;
10103 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
10104 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
10105 rack->rc_tlp_in_progress = 0;
10106 rack->r_ctl.rc_tlp_cnt_out = 0;
10108 * If it is the RXT timer we want to
10109 * stop it, so we can restart a TLP.
10111 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
10112 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
10113 #ifdef NETFLIX_HTTP_LOGGING
10114 tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
10118 * If we have a timestamp reply, update smoothed round trip time. If
10119 * no timestamp is present but transmit timer is running and timed
10120 * sequence number was acked, update smoothed round trip time. Since
10121 * we now have an rtt measurement, cancel the timer backoff (cf.,
10122 * Phil Karn's retransmit alg.). Recompute the initial retransmit
10125 * Some boxes send broken timestamp replies during the SYN+ACK
10126 * phase, ignore timestamps of 0 or we could calculate a huge RTT
10127 * and blow up the retransmit timer.
10130 * If all outstanding data is acked, stop retransmit timer and
10131 * remember to restart (more output or persist). If there is more
10132 * data to be acked, restart retransmit timer, using current
10133 * (possibly backed-off) value.
10137 *ofia = ourfinisacked;
10140 if (IN_RECOVERY(tp->t_flags)) {
10141 if (SEQ_LT(th->th_ack, tp->snd_recover) &&
10142 (SEQ_LT(th->th_ack, tp->snd_max))) {
10143 tcp_rack_partialack(tp);
10145 rack_post_recovery(tp, th->th_ack);
10150 * Let the congestion control algorithm update congestion control
10151 * related information. This typically means increasing the
10152 * congestion window.
10154 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery);
10155 SOCKBUF_LOCK(&so->so_snd);
10156 acked_amount = min(acked, (int)sbavail(&so->so_snd));
10157 tp->snd_wnd -= acked_amount;
10158 mfree = sbcut_locked(&so->so_snd, acked_amount);
10159 if ((sbused(&so->so_snd) == 0) &&
10160 (acked > acked_amount) &&
10161 (tp->t_state >= TCPS_FIN_WAIT_1) &&
10162 (tp->t_flags & TF_SENTFIN)) {
10164 * We must be sure our fin
10165 * was sent and acked (we can be
10166 * in FIN_WAIT_1 without having
10171 tp->snd_una = th->th_ack;
10172 if (acked_amount && sbavail(&so->so_snd))
10173 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
10174 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
10175 /* NB: sowwakeup_locked() does an implicit unlock. */
10176 sowwakeup_locked(so);
10178 if (SEQ_GT(tp->snd_una, tp->snd_recover))
10179 tp->snd_recover = tp->snd_una;
10181 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
10182 tp->snd_nxt = tp->snd_una;
10184 if (under_pacing &&
10185 (rack->use_fixed_rate == 0) &&
10186 (rack->in_probe_rtt == 0) &&
10187 rack->rc_gp_dyn_mul &&
10188 rack->rc_always_pace) {
10189 /* Check if we are dragging bottom */
10190 rack_check_bottom_drag(tp, rack, so, acked);
10192 if (tp->snd_una == tp->snd_max) {
10193 /* Nothing left outstanding */
10194 tp->t_flags &= ~TF_PREVVALID;
10195 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
10196 rack->r_ctl.retran_during_recovery = 0;
10197 rack->r_ctl.dsack_byte_cnt = 0;
10198 if (rack->r_ctl.rc_went_idle_time == 0)
10199 rack->r_ctl.rc_went_idle_time = 1;
10200 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
10201 if (sbavail(&tptosocket(tp)->so_snd) == 0)
10203 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
10204 /* Set need output so persist might get set */
10205 rack->r_wanted_output = 1;
10206 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
10207 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
10208 (sbavail(&so->so_snd) == 0) &&
10209 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
10211 * The socket was gone and the
10212 * peer sent data (now or in the past), time to
10216 /* tcp_close will kill the inp pre-log the Reset */
10217 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
10218 tp = tcp_close(tp);
10219 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
10224 *ofia = ourfinisacked;
10230 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line,
10231 int dir, uint32_t flags, struct rack_sendmap *rsm)
10233 if (tcp_bblogging_on(rack->rc_tp)) {
10234 union tcp_log_stackspecific log;
10237 memset(&log, 0, sizeof(log));
10238 log.u_bbr.flex1 = cnt;
10239 log.u_bbr.flex2 = split;
10240 log.u_bbr.flex3 = out;
10241 log.u_bbr.flex4 = line;
10242 log.u_bbr.flex5 = rack->r_must_retran;
10243 log.u_bbr.flex6 = flags;
10244 log.u_bbr.flex7 = rack->rc_has_collapsed;
10245 log.u_bbr.flex8 = dir; /*
10246 * 1 is collapsed, 0 is uncollapsed,
10247 * 2 is log of a rsm being marked, 3 is a split.
10250 log.u_bbr.rttProp = 0;
10252 log.u_bbr.rttProp = (uint64_t)rsm;
10253 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
10254 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
10255 TCP_LOG_EVENTP(rack->rc_tp, NULL,
10256 &rack->rc_inp->inp_socket->so_rcv,
10257 &rack->rc_inp->inp_socket->so_snd,
10258 TCP_RACK_LOG_COLLAPSE, 0,
10259 0, &log, false, &tv);
10264 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, int line)
10267 * Here all we do is mark the collapsed point and set the flag.
10268 * This may happen again and again, but there is no
10269 * sense splitting our map until we know where the
10270 * peer finally lands in the collapse.
10272 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND);
10273 if ((rack->rc_has_collapsed == 0) ||
10274 (rack->r_ctl.last_collapse_point != (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)))
10275 counter_u64_add(rack_collapsed_win_seen, 1);
10276 rack->r_ctl.last_collapse_point = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd;
10277 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max;
10278 rack->rc_has_collapsed = 1;
10279 rack->r_collapse_point_valid = 1;
10280 rack_log_collapse(rack, 0, 0, rack->r_ctl.last_collapse_point, line, 1, 0, NULL);
10284 rack_un_collapse_window(struct tcp_rack *rack, int line)
10286 struct rack_sendmap *nrsm, *rsm, fe;
10287 int cnt = 0, split = 0;
10289 struct rack_sendmap *insret;
10292 memset(&fe, 0, sizeof(fe));
10293 rack->rc_has_collapsed = 0;
10294 fe.r_start = rack->r_ctl.last_collapse_point;
10295 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
10297 /* Nothing to do maybe the peer ack'ed it all */
10298 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL);
10301 /* Now do we need to split this one? */
10302 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) {
10303 rack_log_collapse(rack, rsm->r_start, rsm->r_end,
10304 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm);
10305 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
10306 if (nrsm == NULL) {
10307 /* We can't get a rsm, mark all? */
10313 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point);
10315 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
10317 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
10318 if (insret != NULL) {
10319 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
10320 nrsm, insret, rack, rsm);
10323 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT,
10324 rack->r_ctl.last_collapse_point, __LINE__);
10325 if (rsm->r_in_tmap) {
10326 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
10327 nrsm->r_in_tmap = 1;
10330 * Set in the new RSM as the
10331 * collapsed starting point
10336 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) {
10337 nrsm->r_flags |= RACK_RWND_COLLAPSED;
10338 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm);
10342 counter_u64_add(rack_collapsed_win, 1);
10344 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL);
10348 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack,
10349 int32_t tlen, int32_t tfo_syn)
10351 if (DELAY_ACK(tp, tlen) || tfo_syn) {
10352 if (rack->rc_dack_mode &&
10354 (rack->rc_dack_toggle == 1)) {
10355 goto no_delayed_ack;
10357 rack_timer_cancel(tp, rack,
10358 rack->r_ctl.rc_rcvtime, __LINE__);
10359 tp->t_flags |= TF_DELACK;
10362 rack->r_wanted_output = 1;
10363 tp->t_flags |= TF_ACKNOW;
10364 if (rack->rc_dack_mode) {
10365 if (tp->t_flags & TF_DELACK)
10366 rack->rc_dack_toggle = 1;
10368 rack->rc_dack_toggle = 0;
10374 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack)
10377 * If fast output is in progress, lets validate that
10378 * the new window did not shrink on us and make it
10379 * so fast output should end.
10381 if (rack->r_fast_output) {
10385 * Calculate what we will send if left as is
10386 * and compare that to our send window.
10388 out = ctf_outstanding(tp);
10389 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) {
10390 /* ok we have an issue */
10391 if (out >= tp->snd_wnd) {
10392 /* Turn off fast output the window is met or collapsed */
10393 rack->r_fast_output = 0;
10395 /* we have some room left */
10396 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out;
10397 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) {
10398 /* If not at least 1 full segment never mind */
10399 rack->r_fast_output = 0;
10408 * Return value of 1, the TCB is unlocked and most
10409 * likely gone, return value of 0, the TCP is still
10413 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
10414 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
10415 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
10418 * Update window information. Don't look at window if no ACK: TAC's
10419 * send garbage on first SYN.
10423 struct tcp_rack *rack;
10425 INP_WLOCK_ASSERT(tptoinpcb(tp));
10427 rack = (struct tcp_rack *)tp->t_fb_ptr;
10428 nsegs = max(1, m->m_pkthdr.lro_nsegs);
10429 if ((thflags & TH_ACK) &&
10430 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
10431 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
10432 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
10433 /* keep track of pure window updates */
10435 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
10436 KMOD_TCPSTAT_INC(tcps_rcvwinupd);
10437 tp->snd_wnd = tiwin;
10438 rack_validate_fo_sendwin_up(tp, rack);
10439 tp->snd_wl1 = th->th_seq;
10440 tp->snd_wl2 = th->th_ack;
10441 if (tp->snd_wnd > tp->max_sndwnd)
10442 tp->max_sndwnd = tp->snd_wnd;
10443 rack->r_wanted_output = 1;
10444 } else if (thflags & TH_ACK) {
10445 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
10446 tp->snd_wnd = tiwin;
10447 rack_validate_fo_sendwin_up(tp, rack);
10448 tp->snd_wl1 = th->th_seq;
10449 tp->snd_wl2 = th->th_ack;
10452 if (tp->snd_wnd < ctf_outstanding(tp))
10453 /* The peer collapsed the window */
10454 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__);
10455 else if (rack->rc_has_collapsed)
10456 rack_un_collapse_window(rack, __LINE__);
10457 if ((rack->r_collapse_point_valid) &&
10458 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point)))
10459 rack->r_collapse_point_valid = 0;
10460 /* Was persist timer active and now we have window space? */
10461 if ((rack->rc_in_persist != 0) &&
10462 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
10463 rack->r_ctl.rc_pace_min_segs))) {
10464 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10465 tp->snd_nxt = tp->snd_max;
10466 /* Make sure we output to start the timer */
10467 rack->r_wanted_output = 1;
10469 /* Do we enter persists? */
10470 if ((rack->rc_in_persist == 0) &&
10471 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
10472 TCPS_HAVEESTABLISHED(tp->t_state) &&
10473 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
10474 sbavail(&tptosocket(tp)->so_snd) &&
10475 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
10477 * Here the rwnd is less than
10478 * the pacing size, we are established,
10479 * nothing is outstanding, and there is
10480 * data to send. Enter persists.
10482 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10484 if (tp->t_flags2 & TF2_DROP_AF_DATA) {
10489 * don't process the URG bit, ignore them drag
10492 tp->rcv_up = tp->rcv_nxt;
10495 * Process the segment text, merging it into the TCP sequencing
10496 * queue, and arranging for acknowledgment of receipt if necessary.
10497 * This process logically involves adjusting tp->rcv_wnd as data is
10498 * presented to the user (this happens in tcp_usrreq.c, case
10499 * PRU_RCVD). If a FIN has already been received on this connection
10500 * then we just ignore the text.
10502 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
10503 IS_FASTOPEN(tp->t_flags));
10504 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
10505 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
10506 tcp_seq save_start = th->th_seq;
10507 tcp_seq save_rnxt = tp->rcv_nxt;
10508 int save_tlen = tlen;
10510 m_adj(m, drop_hdrlen); /* delayed header drop */
10512 * Insert segment which includes th into TCP reassembly
10513 * queue with control block tp. Set thflags to whether
10514 * reassembly now includes a segment with FIN. This handles
10515 * the common case inline (segment is the next to be
10516 * received on an established connection, and the queue is
10517 * empty), avoiding linkage into and removal from the queue
10518 * and repetition of various conversions. Set DELACK for
10519 * segments received in order, but ack immediately when
10520 * segments are out of order (so fast retransmit can work).
10522 if (th->th_seq == tp->rcv_nxt &&
10524 (TCPS_HAVEESTABLISHED(tp->t_state) ||
10526 #ifdef NETFLIX_SB_LIMITS
10527 u_int mcnt, appended;
10529 if (so->so_rcv.sb_shlim) {
10530 mcnt = m_memcnt(m);
10532 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
10533 CFO_NOSLEEP, NULL) == false) {
10534 counter_u64_add(tcp_sb_shlim_fails, 1);
10540 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn);
10541 tp->rcv_nxt += tlen;
10543 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
10544 (tp->t_fbyte_in == 0)) {
10545 tp->t_fbyte_in = ticks;
10546 if (tp->t_fbyte_in == 0)
10547 tp->t_fbyte_in = 1;
10548 if (tp->t_fbyte_out && tp->t_fbyte_in)
10549 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
10551 thflags = tcp_get_flags(th) & TH_FIN;
10552 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
10553 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
10554 SOCKBUF_LOCK(&so->so_rcv);
10555 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10558 #ifdef NETFLIX_SB_LIMITS
10561 sbappendstream_locked(&so->so_rcv, m, 0);
10563 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
10564 /* NB: sorwakeup_locked() does an implicit unlock. */
10565 sorwakeup_locked(so);
10566 #ifdef NETFLIX_SB_LIMITS
10567 if (so->so_rcv.sb_shlim && appended != mcnt)
10568 counter_fo_release(so->so_rcv.sb_shlim,
10573 * XXX: Due to the header drop above "th" is
10574 * theoretically invalid by now. Fortunately
10575 * m_adj() doesn't actually frees any mbufs when
10576 * trimming from the head.
10578 tcp_seq temp = save_start;
10580 thflags = tcp_reass(tp, th, &temp, &tlen, m);
10581 tp->t_flags |= TF_ACKNOW;
10582 if (tp->t_flags & TF_WAKESOR) {
10583 tp->t_flags &= ~TF_WAKESOR;
10584 /* NB: sorwakeup_locked() does an implicit unlock. */
10585 sorwakeup_locked(so);
10588 if ((tp->t_flags & TF_SACK_PERMIT) &&
10590 TCPS_HAVEESTABLISHED(tp->t_state)) {
10591 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
10593 * DSACK actually handled in the fastpath
10596 RACK_OPTS_INC(tcp_sack_path_1);
10597 tcp_update_sack_list(tp, save_start,
10598 save_start + save_tlen);
10599 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
10600 if ((tp->rcv_numsacks >= 1) &&
10601 (tp->sackblks[0].end == save_start)) {
10603 * Partial overlap, recorded at todrop
10606 RACK_OPTS_INC(tcp_sack_path_2a);
10607 tcp_update_sack_list(tp,
10608 tp->sackblks[0].start,
10609 tp->sackblks[0].end);
10611 RACK_OPTS_INC(tcp_sack_path_2b);
10612 tcp_update_dsack_list(tp, save_start,
10613 save_start + save_tlen);
10615 } else if (tlen >= save_tlen) {
10616 /* Update of sackblks. */
10617 RACK_OPTS_INC(tcp_sack_path_3);
10618 tcp_update_dsack_list(tp, save_start,
10619 save_start + save_tlen);
10620 } else if (tlen > 0) {
10621 RACK_OPTS_INC(tcp_sack_path_4);
10622 tcp_update_dsack_list(tp, save_start,
10623 save_start + tlen);
10628 thflags &= ~TH_FIN;
10632 * If FIN is received ACK the FIN and let the user know that the
10633 * connection is closing.
10635 if (thflags & TH_FIN) {
10636 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
10637 /* The socket upcall is handled by socantrcvmore. */
10640 * If connection is half-synchronized (ie NEEDSYN
10641 * flag on) then delay ACK, so it may be piggybacked
10642 * when SYN is sent. Otherwise, since we received a
10643 * FIN then no more input can be expected, send ACK
10646 if (tp->t_flags & TF_NEEDSYN) {
10647 rack_timer_cancel(tp, rack,
10648 rack->r_ctl.rc_rcvtime, __LINE__);
10649 tp->t_flags |= TF_DELACK;
10651 tp->t_flags |= TF_ACKNOW;
10655 switch (tp->t_state) {
10657 * In SYN_RECEIVED and ESTABLISHED STATES enter the
10658 * CLOSE_WAIT state.
10660 case TCPS_SYN_RECEIVED:
10661 tp->t_starttime = ticks;
10663 case TCPS_ESTABLISHED:
10664 rack_timer_cancel(tp, rack,
10665 rack->r_ctl.rc_rcvtime, __LINE__);
10666 tcp_state_change(tp, TCPS_CLOSE_WAIT);
10670 * If still in FIN_WAIT_1 STATE FIN has not been
10671 * acked so enter the CLOSING state.
10673 case TCPS_FIN_WAIT_1:
10674 rack_timer_cancel(tp, rack,
10675 rack->r_ctl.rc_rcvtime, __LINE__);
10676 tcp_state_change(tp, TCPS_CLOSING);
10680 * In FIN_WAIT_2 state enter the TIME_WAIT state,
10681 * starting the time-wait timer, turning off the
10682 * other standard timers.
10684 case TCPS_FIN_WAIT_2:
10685 rack_timer_cancel(tp, rack,
10686 rack->r_ctl.rc_rcvtime, __LINE__);
10692 * Return any desired output.
10694 if ((tp->t_flags & TF_ACKNOW) ||
10695 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
10696 rack->r_wanted_output = 1;
10702 * Here nothing is really faster, its just that we
10703 * have broken out the fast-data path also just like
10707 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
10708 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10709 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos)
10712 int32_t newsize = 0; /* automatic sockbuf scaling */
10713 struct tcp_rack *rack;
10714 #ifdef NETFLIX_SB_LIMITS
10715 u_int mcnt, appended;
10719 * If last ACK falls within this segment's sequence numbers, record
10720 * the timestamp. NOTE that the test is modified according to the
10721 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
10723 if (__predict_false(th->th_seq != tp->rcv_nxt)) {
10726 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
10729 if (tiwin && tiwin != tp->snd_wnd) {
10732 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
10735 if (__predict_false((to->to_flags & TOF_TS) &&
10736 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
10739 if (__predict_false((th->th_ack != tp->snd_una))) {
10742 if (__predict_false(tlen > sbspace(&so->so_rcv))) {
10745 if ((to->to_flags & TOF_TS) != 0 &&
10746 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
10747 tp->ts_recent_age = tcp_ts_getticks();
10748 tp->ts_recent = to->to_tsval;
10750 rack = (struct tcp_rack *)tp->t_fb_ptr;
10752 * This is a pure, in-sequence data packet with nothing on the
10753 * reassembly queue and we have enough buffer space to take it.
10755 nsegs = max(1, m->m_pkthdr.lro_nsegs);
10757 #ifdef NETFLIX_SB_LIMITS
10758 if (so->so_rcv.sb_shlim) {
10759 mcnt = m_memcnt(m);
10761 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
10762 CFO_NOSLEEP, NULL) == false) {
10763 counter_u64_add(tcp_sb_shlim_fails, 1);
10769 /* Clean receiver SACK report if present */
10770 if (tp->rcv_numsacks)
10771 tcp_clean_sackreport(tp);
10772 KMOD_TCPSTAT_INC(tcps_preddat);
10773 tp->rcv_nxt += tlen;
10775 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
10776 (tp->t_fbyte_in == 0)) {
10777 tp->t_fbyte_in = ticks;
10778 if (tp->t_fbyte_in == 0)
10779 tp->t_fbyte_in = 1;
10780 if (tp->t_fbyte_out && tp->t_fbyte_in)
10781 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
10784 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
10786 tp->snd_wl1 = th->th_seq;
10788 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
10790 tp->rcv_up = tp->rcv_nxt;
10791 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
10792 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
10793 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
10795 /* Add data to socket buffer. */
10796 SOCKBUF_LOCK(&so->so_rcv);
10797 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10801 * Set new socket buffer size. Give up when limit is
10805 if (!sbreserve_locked(so, SO_RCV, newsize, NULL))
10806 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
10807 m_adj(m, drop_hdrlen); /* delayed header drop */
10808 #ifdef NETFLIX_SB_LIMITS
10811 sbappendstream_locked(&so->so_rcv, m, 0);
10812 ctf_calc_rwin(so, tp);
10814 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
10815 /* NB: sorwakeup_locked() does an implicit unlock. */
10816 sorwakeup_locked(so);
10817 #ifdef NETFLIX_SB_LIMITS
10818 if (so->so_rcv.sb_shlim && mcnt != appended)
10819 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended);
10821 rack_handle_delayed_ack(tp, rack, tlen, 0);
10822 if (tp->snd_una == tp->snd_max)
10823 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
10828 * This subfunction is used to try to highly optimize the
10829 * fast path. We again allow window updates that are
10830 * in sequence to remain in the fast-path. We also add
10831 * in the __predict's to attempt to help the compiler.
10832 * Note that if we return a 0, then we can *not* process
10833 * it and the caller should push the packet into the
10837 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
10838 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10839 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
10843 int32_t under_pacing = 0;
10844 struct tcp_rack *rack;
10846 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
10847 /* Old ack, behind (or duplicate to) the last one rcv'd */
10850 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
10851 /* Above what we have sent? */
10854 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
10855 /* We are retransmitting */
10858 if (__predict_false(tiwin == 0)) {
10862 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
10863 /* We need a SYN or a FIN, unlikely.. */
10866 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
10867 /* Timestamp is behind .. old ack with seq wrap? */
10870 if (__predict_false(IN_RECOVERY(tp->t_flags))) {
10871 /* Still recovering */
10874 rack = (struct tcp_rack *)tp->t_fb_ptr;
10875 if (rack->r_ctl.rc_sacked) {
10876 /* We have sack holes on our scoreboard */
10879 /* Ok if we reach here, we can process a fast-ack */
10880 if (rack->gp_ready &&
10881 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
10884 nsegs = max(1, m->m_pkthdr.lro_nsegs);
10885 rack_log_ack(tp, to, th, 0, 0);
10886 /* Did the window get updated? */
10887 if (tiwin != tp->snd_wnd) {
10888 tp->snd_wnd = tiwin;
10889 rack_validate_fo_sendwin_up(tp, rack);
10890 tp->snd_wl1 = th->th_seq;
10891 if (tp->snd_wnd > tp->max_sndwnd)
10892 tp->max_sndwnd = tp->snd_wnd;
10894 /* Do we exit persists? */
10895 if ((rack->rc_in_persist != 0) &&
10896 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
10897 rack->r_ctl.rc_pace_min_segs))) {
10898 rack_exit_persist(tp, rack, cts);
10900 /* Do we enter persists? */
10901 if ((rack->rc_in_persist == 0) &&
10902 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
10903 TCPS_HAVEESTABLISHED(tp->t_state) &&
10904 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
10905 sbavail(&tptosocket(tp)->so_snd) &&
10906 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
10908 * Here the rwnd is less than
10909 * the pacing size, we are established,
10910 * nothing is outstanding, and there is
10911 * data to send. Enter persists.
10913 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10916 * If last ACK falls within this segment's sequence numbers, record
10917 * the timestamp. NOTE that the test is modified according to the
10918 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
10920 if ((to->to_flags & TOF_TS) != 0 &&
10921 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
10922 tp->ts_recent_age = tcp_ts_getticks();
10923 tp->ts_recent = to->to_tsval;
10926 * This is a pure ack for outstanding data.
10928 KMOD_TCPSTAT_INC(tcps_predack);
10931 * "bad retransmit" recovery.
10933 if ((tp->t_flags & TF_PREVVALID) &&
10934 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
10935 tp->t_flags &= ~TF_PREVVALID;
10936 if (tp->t_rxtshift == 1 &&
10937 (int)(ticks - tp->t_badrxtwin) < 0)
10938 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__);
10941 * Recalculate the transmit timer / rtt.
10943 * Some boxes send broken timestamp replies during the SYN+ACK
10944 * phase, ignore timestamps of 0 or we could calculate a huge RTT
10945 * and blow up the retransmit timer.
10947 acked = BYTES_THIS_ACK(tp, th);
10950 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
10951 hhook_run_tcp_est_in(tp, th, to);
10953 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
10954 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
10956 struct mbuf *mfree;
10958 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0);
10959 SOCKBUF_LOCK(&so->so_snd);
10960 mfree = sbcut_locked(&so->so_snd, acked);
10961 tp->snd_una = th->th_ack;
10962 /* Note we want to hold the sb lock through the sendmap adjust */
10963 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
10964 /* Wake up the socket if we have room to write more */
10965 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
10966 sowwakeup_locked(so);
10968 tp->t_rxtshift = 0;
10969 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
10970 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
10971 rack->rc_tlp_in_progress = 0;
10972 rack->r_ctl.rc_tlp_cnt_out = 0;
10974 * If it is the RXT timer we want to
10975 * stop it, so we can restart a TLP.
10977 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
10978 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
10979 #ifdef NETFLIX_HTTP_LOGGING
10980 tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
10984 * Let the congestion control algorithm update congestion control
10985 * related information. This typically means increasing the
10986 * congestion window.
10988 if (tp->snd_wnd < ctf_outstanding(tp)) {
10989 /* The peer collapsed the window */
10990 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__);
10991 } else if (rack->rc_has_collapsed)
10992 rack_un_collapse_window(rack, __LINE__);
10993 if ((rack->r_collapse_point_valid) &&
10994 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point)))
10995 rack->r_collapse_point_valid = 0;
10997 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
10999 tp->snd_wl2 = th->th_ack;
11002 /* ND6_HINT(tp); *//* Some progress has been made. */
11005 * If all outstanding data are acked, stop retransmit timer,
11006 * otherwise restart timer using current (possibly backed-off)
11007 * value. If process is waiting for space, wakeup/selwakeup/signal.
11008 * If data are ready to send, let tcp_output decide between more
11009 * output or persist.
11011 if (under_pacing &&
11012 (rack->use_fixed_rate == 0) &&
11013 (rack->in_probe_rtt == 0) &&
11014 rack->rc_gp_dyn_mul &&
11015 rack->rc_always_pace) {
11016 /* Check if we are dragging bottom */
11017 rack_check_bottom_drag(tp, rack, so, acked);
11019 if (tp->snd_una == tp->snd_max) {
11020 tp->t_flags &= ~TF_PREVVALID;
11021 rack->r_ctl.retran_during_recovery = 0;
11022 rack->r_ctl.dsack_byte_cnt = 0;
11023 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
11024 if (rack->r_ctl.rc_went_idle_time == 0)
11025 rack->r_ctl.rc_went_idle_time = 1;
11026 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
11027 if (sbavail(&tptosocket(tp)->so_snd) == 0)
11029 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
11031 if (acked && rack->r_fast_output)
11032 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked);
11033 if (sbavail(&so->so_snd)) {
11034 rack->r_wanted_output = 1;
11040 * Return value of 1, the TCB is unlocked and most
11041 * likely gone, return value of 0, the TCP is still
11045 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
11046 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11047 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11049 int32_t ret_val = 0;
11051 int32_t ourfinisacked = 0;
11052 struct tcp_rack *rack;
11054 INP_WLOCK_ASSERT(tptoinpcb(tp));
11056 ctf_calc_rwin(so, tp);
11058 * If the state is SYN_SENT: if seg contains an ACK, but not for our
11059 * SYN, drop the input. if seg contains a RST, then drop the
11060 * connection. if seg does not contain SYN, then drop it. Otherwise
11061 * this is an acceptable SYN segment initialize tp->rcv_nxt and
11062 * tp->irs if seg contains ack then advance tp->snd_una if seg
11063 * contains an ECE and ECN support is enabled, the stream is ECN
11064 * capable. if SYN has been acked change to ESTABLISHED else
11065 * SYN_RCVD state arrange for segment to be acked (eventually)
11066 * continue processing rest of data/controls.
11068 if ((thflags & TH_ACK) &&
11069 (SEQ_LEQ(th->th_ack, tp->iss) ||
11070 SEQ_GT(th->th_ack, tp->snd_max))) {
11071 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
11072 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11075 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
11076 TCP_PROBE5(connect__refused, NULL, tp,
11077 mtod(m, const char *), tp, th);
11078 tp = tcp_drop(tp, ECONNREFUSED);
11079 ctf_do_drop(m, tp);
11082 if (thflags & TH_RST) {
11083 ctf_do_drop(m, tp);
11086 if (!(thflags & TH_SYN)) {
11087 ctf_do_drop(m, tp);
11090 tp->irs = th->th_seq;
11091 tcp_rcvseqinit(tp);
11092 rack = (struct tcp_rack *)tp->t_fb_ptr;
11093 if (thflags & TH_ACK) {
11094 int tfo_partial = 0;
11096 KMOD_TCPSTAT_INC(tcps_connects);
11099 mac_socketpeer_set_from_mbuf(m, so);
11101 /* Do window scaling on this connection? */
11102 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
11103 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
11104 tp->rcv_scale = tp->request_r_scale;
11106 tp->rcv_adv += min(tp->rcv_wnd,
11107 TCP_MAXWIN << tp->rcv_scale);
11109 * If not all the data that was sent in the TFO SYN
11110 * has been acked, resend the remainder right away.
11112 if (IS_FASTOPEN(tp->t_flags) &&
11113 (tp->snd_una != tp->snd_max)) {
11114 tp->snd_nxt = th->th_ack;
11118 * If there's data, delay ACK; if there's also a FIN ACKNOW
11119 * will be turned on later.
11121 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) {
11122 rack_timer_cancel(tp, rack,
11123 rack->r_ctl.rc_rcvtime, __LINE__);
11124 tp->t_flags |= TF_DELACK;
11126 rack->r_wanted_output = 1;
11127 tp->t_flags |= TF_ACKNOW;
11128 rack->rc_dack_toggle = 0;
11131 tcp_ecn_input_syn_sent(tp, thflags, iptos);
11133 if (SEQ_GT(th->th_ack, tp->snd_una)) {
11135 * We advance snd_una for the
11136 * fast open case. If th_ack is
11137 * acknowledging data beyond
11138 * snd_una we can't just call
11139 * ack-processing since the
11140 * data stream in our send-map
11141 * will start at snd_una + 1 (one
11142 * beyond the SYN). If its just
11143 * equal we don't need to do that
11144 * and there is no send_map.
11149 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
11150 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
11152 tp->t_starttime = ticks;
11153 if (tp->t_flags & TF_NEEDFIN) {
11154 tcp_state_change(tp, TCPS_FIN_WAIT_1);
11155 tp->t_flags &= ~TF_NEEDFIN;
11156 thflags &= ~TH_SYN;
11158 tcp_state_change(tp, TCPS_ESTABLISHED);
11159 TCP_PROBE5(connect__established, NULL, tp,
11160 mtod(m, const char *), tp, th);
11161 rack_cc_conn_init(tp);
11165 * Received initial SYN in SYN-SENT[*] state => simultaneous
11166 * open. If segment contains CC option and there is a
11167 * cached CC, apply TAO test. If it succeeds, connection is *
11168 * half-synchronized. Otherwise, do 3-way handshake:
11169 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
11170 * there was no CC option, clear cached CC value.
11172 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN);
11173 tcp_state_change(tp, TCPS_SYN_RECEIVED);
11176 * Advance th->th_seq to correspond to first data byte. If data,
11177 * trim to stay within window, dropping FIN if necessary.
11180 if (tlen > tp->rcv_wnd) {
11181 todrop = tlen - tp->rcv_wnd;
11183 tlen = tp->rcv_wnd;
11184 thflags &= ~TH_FIN;
11185 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin);
11186 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
11188 tp->snd_wl1 = th->th_seq - 1;
11189 tp->rcv_up = th->th_seq;
11191 * Client side of transaction: already sent SYN and data. If the
11192 * remote host used T/TCP to validate the SYN, our data will be
11193 * ACK'd; if so, enter normal data segment processing in the middle
11194 * of step 5, ack processing. Otherwise, goto step 6.
11196 if (thflags & TH_ACK) {
11197 /* For syn-sent we need to possibly update the rtt */
11198 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
11201 mcts = tcp_ts_getticks();
11202 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
11203 if (!tp->t_rttlow || tp->t_rttlow > t)
11205 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4);
11206 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
11207 tcp_rack_xmit_timer_commit(rack, tp);
11209 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
11211 /* We may have changed to FIN_WAIT_1 above */
11212 if (tp->t_state == TCPS_FIN_WAIT_1) {
11214 * In FIN_WAIT_1 STATE in addition to the processing
11215 * for the ESTABLISHED state if our FIN is now
11216 * acknowledged then enter FIN_WAIT_2.
11218 if (ourfinisacked) {
11220 * If we can't receive any more data, then
11221 * closing user can proceed. Starting the
11222 * timer is contrary to the specification,
11223 * but if we don't get a FIN we'll hang
11226 * XXXjl: we should release the tp also, and
11227 * use a compressed state.
11229 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
11230 soisdisconnected(so);
11231 tcp_timer_activate(tp, TT_2MSL,
11232 (tcp_fast_finwait2_recycle ?
11233 tcp_finwait2_timeout :
11236 tcp_state_change(tp, TCPS_FIN_WAIT_2);
11240 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11241 tiwin, thflags, nxt_pkt));
11245 * Return value of 1, the TCB is unlocked and most
11246 * likely gone, return value of 0, the TCP is still
11250 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
11251 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11252 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11254 struct tcp_rack *rack;
11255 int32_t ret_val = 0;
11256 int32_t ourfinisacked = 0;
11258 ctf_calc_rwin(so, tp);
11259 if ((thflags & TH_ACK) &&
11260 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
11261 SEQ_GT(th->th_ack, tp->snd_max))) {
11262 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
11263 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11266 rack = (struct tcp_rack *)tp->t_fb_ptr;
11267 if (IS_FASTOPEN(tp->t_flags)) {
11269 * When a TFO connection is in SYN_RECEIVED, the
11270 * only valid packets are the initial SYN, a
11271 * retransmit/copy of the initial SYN (possibly with
11272 * a subset of the original data), a valid ACK, a
11275 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
11276 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
11277 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11279 } else if (thflags & TH_SYN) {
11280 /* non-initial SYN is ignored */
11281 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
11282 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
11283 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
11284 ctf_do_drop(m, NULL);
11287 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
11288 ctf_do_drop(m, NULL);
11293 if ((thflags & TH_RST) ||
11294 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11295 return (__ctf_process_rst(m, th, so, tp,
11296 &rack->r_ctl.challenge_ack_ts,
11297 &rack->r_ctl.challenge_ack_cnt));
11299 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11300 * it's less than ts_recent, drop it.
11302 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11303 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11304 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11308 * In the SYN-RECEIVED state, validate that the packet belongs to
11309 * this connection before trimming the data to fit the receive
11310 * window. Check the sequence number versus IRS since we know the
11311 * sequence numbers haven't wrapped. This is a partial fix for the
11312 * "LAND" DoS attack.
11314 if (SEQ_LT(th->th_seq, tp->irs)) {
11315 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
11316 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11319 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11320 &rack->r_ctl.challenge_ack_ts,
11321 &rack->r_ctl.challenge_ack_cnt)) {
11325 * If last ACK falls within this segment's sequence numbers, record
11326 * its timestamp. NOTE: 1) That the test incorporates suggestions
11327 * from the latest proposal of the tcplw@cray.com list (Braden
11328 * 1993/04/26). 2) That updating only on newer timestamps interferes
11329 * with our earlier PAWS tests, so this check should be solely
11330 * predicated on the sequence space of this segment. 3) That we
11331 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11332 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11333 * SEG.Len, This modified check allows us to overcome RFC1323's
11334 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11335 * p.869. In such cases, we can still calculate the RTT correctly
11336 * when RCV.NXT == Last.ACK.Sent.
11338 if ((to->to_flags & TOF_TS) != 0 &&
11339 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11340 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11341 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11342 tp->ts_recent_age = tcp_ts_getticks();
11343 tp->ts_recent = to->to_tsval;
11345 tp->snd_wnd = tiwin;
11346 rack_validate_fo_sendwin_up(tp, rack);
11348 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11349 * is on (half-synchronized state), then queue data for later
11350 * processing; else drop segment and return.
11352 if ((thflags & TH_ACK) == 0) {
11353 if (IS_FASTOPEN(tp->t_flags)) {
11354 rack_cc_conn_init(tp);
11356 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11357 tiwin, thflags, nxt_pkt));
11359 KMOD_TCPSTAT_INC(tcps_connects);
11360 if (tp->t_flags & TF_SONOTCONN) {
11361 tp->t_flags &= ~TF_SONOTCONN;
11364 /* Do window scaling? */
11365 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
11366 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
11367 tp->rcv_scale = tp->request_r_scale;
11370 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* ->
11373 tp->t_starttime = ticks;
11374 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
11375 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
11376 tp->t_tfo_pending = NULL;
11378 if (tp->t_flags & TF_NEEDFIN) {
11379 tcp_state_change(tp, TCPS_FIN_WAIT_1);
11380 tp->t_flags &= ~TF_NEEDFIN;
11382 tcp_state_change(tp, TCPS_ESTABLISHED);
11383 TCP_PROBE5(accept__established, NULL, tp,
11384 mtod(m, const char *), tp, th);
11386 * TFO connections call cc_conn_init() during SYN
11387 * processing. Calling it again here for such connections
11388 * is not harmless as it would undo the snd_cwnd reduction
11389 * that occurs when a TFO SYN|ACK is retransmitted.
11391 if (!IS_FASTOPEN(tp->t_flags))
11392 rack_cc_conn_init(tp);
11395 * Account for the ACK of our SYN prior to
11396 * regular ACK processing below, except for
11397 * simultaneous SYN, which is handled later.
11399 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
11402 * If segment contains data or ACK, will call tcp_reass() later; if
11403 * not, do so now to pass queued data to user.
11405 if (tlen == 0 && (thflags & TH_FIN) == 0) {
11406 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
11408 if (tp->t_flags & TF_WAKESOR) {
11409 tp->t_flags &= ~TF_WAKESOR;
11410 /* NB: sorwakeup_locked() does an implicit unlock. */
11411 sorwakeup_locked(so);
11414 tp->snd_wl1 = th->th_seq - 1;
11415 /* For syn-recv we need to possibly update the rtt */
11416 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
11419 mcts = tcp_ts_getticks();
11420 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
11421 if (!tp->t_rttlow || tp->t_rttlow > t)
11423 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5);
11424 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
11425 tcp_rack_xmit_timer_commit(rack, tp);
11427 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11430 if (tp->t_state == TCPS_FIN_WAIT_1) {
11431 /* We could have went to FIN_WAIT_1 (or EST) above */
11433 * In FIN_WAIT_1 STATE in addition to the processing for the
11434 * ESTABLISHED state if our FIN is now acknowledged then
11435 * enter FIN_WAIT_2.
11437 if (ourfinisacked) {
11439 * If we can't receive any more data, then closing
11440 * user can proceed. Starting the timer is contrary
11441 * to the specification, but if we don't get a FIN
11442 * we'll hang forever.
11444 * XXXjl: we should release the tp also, and use a
11445 * compressed state.
11447 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
11448 soisdisconnected(so);
11449 tcp_timer_activate(tp, TT_2MSL,
11450 (tcp_fast_finwait2_recycle ?
11451 tcp_finwait2_timeout :
11454 tcp_state_change(tp, TCPS_FIN_WAIT_2);
11457 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11458 tiwin, thflags, nxt_pkt));
11462 * Return value of 1, the TCB is unlocked and most
11463 * likely gone, return value of 0, the TCP is still
11467 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
11468 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11469 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11471 int32_t ret_val = 0;
11472 struct tcp_rack *rack;
11475 * Header prediction: check for the two common cases of a
11476 * uni-directional data xfer. If the packet has no control flags,
11477 * is in-sequence, the window didn't change and we're not
11478 * retransmitting, it's a candidate. If the length is zero and the
11479 * ack moved forward, we're the sender side of the xfer. Just free
11480 * the data acked & wake any higher level process that was blocked
11481 * waiting for space. If the length is non-zero and the ack didn't
11482 * move, we're the receiver side. If we're getting packets in-order
11483 * (the reassembly queue is empty), add the data toc The socket
11484 * buffer and note that we need a delayed ack. Make sure that the
11485 * hidden state-flags are also off. Since we check for
11486 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
11488 rack = (struct tcp_rack *)tp->t_fb_ptr;
11489 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
11490 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) &&
11491 __predict_true(SEGQ_EMPTY(tp)) &&
11492 __predict_true(th->th_seq == tp->rcv_nxt)) {
11494 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
11495 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
11499 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
11500 tiwin, nxt_pkt, iptos)) {
11505 ctf_calc_rwin(so, tp);
11507 if ((thflags & TH_RST) ||
11508 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11509 return (__ctf_process_rst(m, th, so, tp,
11510 &rack->r_ctl.challenge_ack_ts,
11511 &rack->r_ctl.challenge_ack_cnt));
11514 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11515 * synchronized state.
11517 if (thflags & TH_SYN) {
11518 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
11522 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11523 * it's less than ts_recent, drop it.
11525 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11526 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11527 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11530 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11531 &rack->r_ctl.challenge_ack_ts,
11532 &rack->r_ctl.challenge_ack_cnt)) {
11536 * If last ACK falls within this segment's sequence numbers, record
11537 * its timestamp. NOTE: 1) That the test incorporates suggestions
11538 * from the latest proposal of the tcplw@cray.com list (Braden
11539 * 1993/04/26). 2) That updating only on newer timestamps interferes
11540 * with our earlier PAWS tests, so this check should be solely
11541 * predicated on the sequence space of this segment. 3) That we
11542 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11543 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11544 * SEG.Len, This modified check allows us to overcome RFC1323's
11545 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11546 * p.869. In such cases, we can still calculate the RTT correctly
11547 * when RCV.NXT == Last.ACK.Sent.
11549 if ((to->to_flags & TOF_TS) != 0 &&
11550 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11551 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11552 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11553 tp->ts_recent_age = tcp_ts_getticks();
11554 tp->ts_recent = to->to_tsval;
11557 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11558 * is on (half-synchronized state), then queue data for later
11559 * processing; else drop segment and return.
11561 if ((thflags & TH_ACK) == 0) {
11562 if (tp->t_flags & TF_NEEDSYN) {
11563 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11564 tiwin, thflags, nxt_pkt));
11566 } else if (tp->t_flags & TF_ACKNOW) {
11567 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11568 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11571 ctf_do_drop(m, NULL);
11578 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
11581 if (sbavail(&so->so_snd)) {
11582 if (ctf_progress_timeout_check(tp, true)) {
11583 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
11584 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11588 /* State changes only happen in rack_process_data() */
11589 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11590 tiwin, thflags, nxt_pkt));
11594 * Return value of 1, the TCB is unlocked and most
11595 * likely gone, return value of 0, the TCP is still
11599 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
11600 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11601 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11603 int32_t ret_val = 0;
11604 struct tcp_rack *rack;
11606 rack = (struct tcp_rack *)tp->t_fb_ptr;
11607 ctf_calc_rwin(so, tp);
11608 if ((thflags & TH_RST) ||
11609 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11610 return (__ctf_process_rst(m, th, so, tp,
11611 &rack->r_ctl.challenge_ack_ts,
11612 &rack->r_ctl.challenge_ack_cnt));
11614 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11615 * synchronized state.
11617 if (thflags & TH_SYN) {
11618 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
11622 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11623 * it's less than ts_recent, drop it.
11625 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11626 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11627 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11630 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11631 &rack->r_ctl.challenge_ack_ts,
11632 &rack->r_ctl.challenge_ack_cnt)) {
11636 * If last ACK falls within this segment's sequence numbers, record
11637 * its timestamp. NOTE: 1) That the test incorporates suggestions
11638 * from the latest proposal of the tcplw@cray.com list (Braden
11639 * 1993/04/26). 2) That updating only on newer timestamps interferes
11640 * with our earlier PAWS tests, so this check should be solely
11641 * predicated on the sequence space of this segment. 3) That we
11642 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11643 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11644 * SEG.Len, This modified check allows us to overcome RFC1323's
11645 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11646 * p.869. In such cases, we can still calculate the RTT correctly
11647 * when RCV.NXT == Last.ACK.Sent.
11649 if ((to->to_flags & TOF_TS) != 0 &&
11650 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11651 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11652 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11653 tp->ts_recent_age = tcp_ts_getticks();
11654 tp->ts_recent = to->to_tsval;
11657 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11658 * is on (half-synchronized state), then queue data for later
11659 * processing; else drop segment and return.
11661 if ((thflags & TH_ACK) == 0) {
11662 if (tp->t_flags & TF_NEEDSYN) {
11663 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11664 tiwin, thflags, nxt_pkt));
11666 } else if (tp->t_flags & TF_ACKNOW) {
11667 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11668 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11671 ctf_do_drop(m, NULL);
11678 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
11681 if (sbavail(&so->so_snd)) {
11682 if (ctf_progress_timeout_check(tp, true)) {
11683 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11684 tp, tick, PROGRESS_DROP, __LINE__);
11685 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11689 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11690 tiwin, thflags, nxt_pkt));
11694 rack_check_data_after_close(struct mbuf *m,
11695 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
11697 struct tcp_rack *rack;
11699 rack = (struct tcp_rack *)tp->t_fb_ptr;
11700 if (rack->rc_allow_data_af_clo == 0) {
11702 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
11703 /* tcp_close will kill the inp pre-log the Reset */
11704 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
11705 tp = tcp_close(tp);
11706 KMOD_TCPSTAT_INC(tcps_rcvafterclose);
11707 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
11710 if (sbavail(&so->so_snd) == 0)
11712 /* Ok we allow data that is ignored and a followup reset */
11713 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
11714 tp->rcv_nxt = th->th_seq + *tlen;
11715 tp->t_flags2 |= TF2_DROP_AF_DATA;
11716 rack->r_wanted_output = 1;
11722 * Return value of 1, the TCB is unlocked and most
11723 * likely gone, return value of 0, the TCP is still
11727 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
11728 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11729 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11731 int32_t ret_val = 0;
11732 int32_t ourfinisacked = 0;
11733 struct tcp_rack *rack;
11735 rack = (struct tcp_rack *)tp->t_fb_ptr;
11736 ctf_calc_rwin(so, tp);
11738 if ((thflags & TH_RST) ||
11739 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11740 return (__ctf_process_rst(m, th, so, tp,
11741 &rack->r_ctl.challenge_ack_ts,
11742 &rack->r_ctl.challenge_ack_cnt));
11744 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11745 * synchronized state.
11747 if (thflags & TH_SYN) {
11748 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
11752 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11753 * it's less than ts_recent, drop it.
11755 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11756 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11757 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11760 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11761 &rack->r_ctl.challenge_ack_ts,
11762 &rack->r_ctl.challenge_ack_cnt)) {
11766 * If new data are received on a connection after the user processes
11767 * are gone, then RST the other end.
11769 if ((tp->t_flags & TF_CLOSED) && tlen &&
11770 rack_check_data_after_close(m, tp, &tlen, th, so))
11773 * If last ACK falls within this segment's sequence numbers, record
11774 * its timestamp. NOTE: 1) That the test incorporates suggestions
11775 * from the latest proposal of the tcplw@cray.com list (Braden
11776 * 1993/04/26). 2) That updating only on newer timestamps interferes
11777 * with our earlier PAWS tests, so this check should be solely
11778 * predicated on the sequence space of this segment. 3) That we
11779 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11780 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11781 * SEG.Len, This modified check allows us to overcome RFC1323's
11782 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11783 * p.869. In such cases, we can still calculate the RTT correctly
11784 * when RCV.NXT == Last.ACK.Sent.
11786 if ((to->to_flags & TOF_TS) != 0 &&
11787 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11788 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11789 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11790 tp->ts_recent_age = tcp_ts_getticks();
11791 tp->ts_recent = to->to_tsval;
11794 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11795 * is on (half-synchronized state), then queue data for later
11796 * processing; else drop segment and return.
11798 if ((thflags & TH_ACK) == 0) {
11799 if (tp->t_flags & TF_NEEDSYN) {
11800 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11801 tiwin, thflags, nxt_pkt));
11802 } else if (tp->t_flags & TF_ACKNOW) {
11803 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11804 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11807 ctf_do_drop(m, NULL);
11814 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11817 if (ourfinisacked) {
11819 * If we can't receive any more data, then closing user can
11820 * proceed. Starting the timer is contrary to the
11821 * specification, but if we don't get a FIN we'll hang
11824 * XXXjl: we should release the tp also, and use a
11825 * compressed state.
11827 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
11828 soisdisconnected(so);
11829 tcp_timer_activate(tp, TT_2MSL,
11830 (tcp_fast_finwait2_recycle ?
11831 tcp_finwait2_timeout :
11834 tcp_state_change(tp, TCPS_FIN_WAIT_2);
11836 if (sbavail(&so->so_snd)) {
11837 if (ctf_progress_timeout_check(tp, true)) {
11838 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11839 tp, tick, PROGRESS_DROP, __LINE__);
11840 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11844 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11845 tiwin, thflags, nxt_pkt));
11849 * Return value of 1, the TCB is unlocked and most
11850 * likely gone, return value of 0, the TCP is still
11854 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
11855 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11856 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11858 int32_t ret_val = 0;
11859 int32_t ourfinisacked = 0;
11860 struct tcp_rack *rack;
11862 rack = (struct tcp_rack *)tp->t_fb_ptr;
11863 ctf_calc_rwin(so, tp);
11865 if ((thflags & TH_RST) ||
11866 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11867 return (__ctf_process_rst(m, th, so, tp,
11868 &rack->r_ctl.challenge_ack_ts,
11869 &rack->r_ctl.challenge_ack_cnt));
11871 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11872 * synchronized state.
11874 if (thflags & TH_SYN) {
11875 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
11879 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11880 * it's less than ts_recent, drop it.
11882 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11883 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11884 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11887 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11888 &rack->r_ctl.challenge_ack_ts,
11889 &rack->r_ctl.challenge_ack_cnt)) {
11893 * If new data are received on a connection after the user processes
11894 * are gone, then RST the other end.
11896 if ((tp->t_flags & TF_CLOSED) && tlen &&
11897 rack_check_data_after_close(m, tp, &tlen, th, so))
11900 * If last ACK falls within this segment's sequence numbers, record
11901 * its timestamp. NOTE: 1) That the test incorporates suggestions
11902 * from the latest proposal of the tcplw@cray.com list (Braden
11903 * 1993/04/26). 2) That updating only on newer timestamps interferes
11904 * with our earlier PAWS tests, so this check should be solely
11905 * predicated on the sequence space of this segment. 3) That we
11906 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11907 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11908 * SEG.Len, This modified check allows us to overcome RFC1323's
11909 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11910 * p.869. In such cases, we can still calculate the RTT correctly
11911 * when RCV.NXT == Last.ACK.Sent.
11913 if ((to->to_flags & TOF_TS) != 0 &&
11914 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11915 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11916 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11917 tp->ts_recent_age = tcp_ts_getticks();
11918 tp->ts_recent = to->to_tsval;
11921 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11922 * is on (half-synchronized state), then queue data for later
11923 * processing; else drop segment and return.
11925 if ((thflags & TH_ACK) == 0) {
11926 if (tp->t_flags & TF_NEEDSYN) {
11927 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11928 tiwin, thflags, nxt_pkt));
11929 } else if (tp->t_flags & TF_ACKNOW) {
11930 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11931 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11934 ctf_do_drop(m, NULL);
11941 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11944 if (ourfinisacked) {
11949 if (sbavail(&so->so_snd)) {
11950 if (ctf_progress_timeout_check(tp, true)) {
11951 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11952 tp, tick, PROGRESS_DROP, __LINE__);
11953 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11957 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11958 tiwin, thflags, nxt_pkt));
11962 * Return value of 1, the TCB is unlocked and most
11963 * likely gone, return value of 0, the TCP is still
11967 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
11968 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11969 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11971 int32_t ret_val = 0;
11972 int32_t ourfinisacked = 0;
11973 struct tcp_rack *rack;
11975 rack = (struct tcp_rack *)tp->t_fb_ptr;
11976 ctf_calc_rwin(so, tp);
11978 if ((thflags & TH_RST) ||
11979 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11980 return (__ctf_process_rst(m, th, so, tp,
11981 &rack->r_ctl.challenge_ack_ts,
11982 &rack->r_ctl.challenge_ack_cnt));
11984 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11985 * synchronized state.
11987 if (thflags & TH_SYN) {
11988 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
11992 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11993 * it's less than ts_recent, drop it.
11995 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11996 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11997 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
12000 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
12001 &rack->r_ctl.challenge_ack_ts,
12002 &rack->r_ctl.challenge_ack_cnt)) {
12006 * If new data are received on a connection after the user processes
12007 * are gone, then RST the other end.
12009 if ((tp->t_flags & TF_CLOSED) && tlen &&
12010 rack_check_data_after_close(m, tp, &tlen, th, so))
12013 * If last ACK falls within this segment's sequence numbers, record
12014 * its timestamp. NOTE: 1) That the test incorporates suggestions
12015 * from the latest proposal of the tcplw@cray.com list (Braden
12016 * 1993/04/26). 2) That updating only on newer timestamps interferes
12017 * with our earlier PAWS tests, so this check should be solely
12018 * predicated on the sequence space of this segment. 3) That we
12019 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
12020 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
12021 * SEG.Len, This modified check allows us to overcome RFC1323's
12022 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
12023 * p.869. In such cases, we can still calculate the RTT correctly
12024 * when RCV.NXT == Last.ACK.Sent.
12026 if ((to->to_flags & TOF_TS) != 0 &&
12027 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
12028 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
12029 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
12030 tp->ts_recent_age = tcp_ts_getticks();
12031 tp->ts_recent = to->to_tsval;
12034 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
12035 * is on (half-synchronized state), then queue data for later
12036 * processing; else drop segment and return.
12038 if ((thflags & TH_ACK) == 0) {
12039 if (tp->t_flags & TF_NEEDSYN) {
12040 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
12041 tiwin, thflags, nxt_pkt));
12042 } else if (tp->t_flags & TF_ACKNOW) {
12043 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
12044 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
12047 ctf_do_drop(m, NULL);
12052 * case TCPS_LAST_ACK: Ack processing.
12054 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
12057 if (ourfinisacked) {
12058 tp = tcp_close(tp);
12059 ctf_do_drop(m, tp);
12062 if (sbavail(&so->so_snd)) {
12063 if (ctf_progress_timeout_check(tp, true)) {
12064 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
12065 tp, tick, PROGRESS_DROP, __LINE__);
12066 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
12070 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
12071 tiwin, thflags, nxt_pkt));
12075 * Return value of 1, the TCB is unlocked and most
12076 * likely gone, return value of 0, the TCP is still
12080 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
12081 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
12082 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
12084 int32_t ret_val = 0;
12085 int32_t ourfinisacked = 0;
12086 struct tcp_rack *rack;
12088 rack = (struct tcp_rack *)tp->t_fb_ptr;
12089 ctf_calc_rwin(so, tp);
12091 /* Reset receive buffer auto scaling when not in bulk receive mode. */
12092 if ((thflags & TH_RST) ||
12093 (tp->t_fin_is_rst && (thflags & TH_FIN)))
12094 return (__ctf_process_rst(m, th, so, tp,
12095 &rack->r_ctl.challenge_ack_ts,
12096 &rack->r_ctl.challenge_ack_cnt));
12098 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
12099 * synchronized state.
12101 if (thflags & TH_SYN) {
12102 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
12106 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
12107 * it's less than ts_recent, drop it.
12109 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
12110 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
12111 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
12114 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
12115 &rack->r_ctl.challenge_ack_ts,
12116 &rack->r_ctl.challenge_ack_cnt)) {
12120 * If new data are received on a connection after the user processes
12121 * are gone, then RST the other end.
12123 if ((tp->t_flags & TF_CLOSED) && tlen &&
12124 rack_check_data_after_close(m, tp, &tlen, th, so))
12127 * If last ACK falls within this segment's sequence numbers, record
12128 * its timestamp. NOTE: 1) That the test incorporates suggestions
12129 * from the latest proposal of the tcplw@cray.com list (Braden
12130 * 1993/04/26). 2) That updating only on newer timestamps interferes
12131 * with our earlier PAWS tests, so this check should be solely
12132 * predicated on the sequence space of this segment. 3) That we
12133 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
12134 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
12135 * SEG.Len, This modified check allows us to overcome RFC1323's
12136 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
12137 * p.869. In such cases, we can still calculate the RTT correctly
12138 * when RCV.NXT == Last.ACK.Sent.
12140 if ((to->to_flags & TOF_TS) != 0 &&
12141 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
12142 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
12143 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
12144 tp->ts_recent_age = tcp_ts_getticks();
12145 tp->ts_recent = to->to_tsval;
12148 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
12149 * is on (half-synchronized state), then queue data for later
12150 * processing; else drop segment and return.
12152 if ((thflags & TH_ACK) == 0) {
12153 if (tp->t_flags & TF_NEEDSYN) {
12154 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
12155 tiwin, thflags, nxt_pkt));
12156 } else if (tp->t_flags & TF_ACKNOW) {
12157 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
12158 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
12161 ctf_do_drop(m, NULL);
12168 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
12171 if (sbavail(&so->so_snd)) {
12172 if (ctf_progress_timeout_check(tp, true)) {
12173 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
12174 tp, tick, PROGRESS_DROP, __LINE__);
12175 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
12179 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
12180 tiwin, thflags, nxt_pkt));
12184 rack_clear_rate_sample(struct tcp_rack *rack)
12186 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
12187 rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
12188 rack->r_ctl.rack_rs.rs_rtt_tot = 0;
12192 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override)
12194 uint64_t bw_est, rate_wanted;
12196 uint32_t user_max, orig_min, orig_max;
12198 orig_min = rack->r_ctl.rc_pace_min_segs;
12199 orig_max = rack->r_ctl.rc_pace_max_segs;
12200 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs;
12201 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs)
12203 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp);
12204 if (rack->use_fixed_rate || rack->rc_force_max_seg) {
12205 if (user_max != rack->r_ctl.rc_pace_max_segs)
12208 if (rack->rc_force_max_seg) {
12209 rack->r_ctl.rc_pace_max_segs = user_max;
12210 } else if (rack->use_fixed_rate) {
12211 bw_est = rack_get_bw(rack);
12212 if ((rack->r_ctl.crte == NULL) ||
12213 (bw_est != rack->r_ctl.crte->rate)) {
12214 rack->r_ctl.rc_pace_max_segs = user_max;
12216 /* We are pacing right at the hardware rate */
12219 segsiz = min(ctf_fixed_maxseg(tp),
12220 rack->r_ctl.rc_pace_min_segs);
12221 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(
12222 tp, bw_est, segsiz, 0,
12223 rack->r_ctl.crte, NULL);
12225 } else if (rack->rc_always_pace) {
12226 if (rack->r_ctl.gp_bw ||
12227 #ifdef NETFLIX_PEAKRATE
12228 rack->rc_tp->t_maxpeakrate ||
12230 rack->r_ctl.init_rate) {
12231 /* We have a rate of some sort set */
12234 bw_est = rack_get_bw(rack);
12235 orig = rack->r_ctl.rc_pace_max_segs;
12237 rate_wanted = *fill_override;
12239 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL);
12241 /* We have something */
12242 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack,
12244 ctf_fixed_maxseg(rack->rc_tp));
12246 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs;
12247 if (orig != rack->r_ctl.rc_pace_max_segs)
12249 } else if ((rack->r_ctl.gp_bw == 0) &&
12250 (rack->r_ctl.rc_pace_max_segs == 0)) {
12252 * If we have nothing limit us to bursting
12253 * out IW sized pieces.
12256 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack);
12259 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) {
12261 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES;
12264 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2);
12269 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack)
12272 struct ip6_hdr *ip6 = NULL;
12275 struct ip *ip = NULL;
12277 struct udphdr *udp = NULL;
12279 /* Ok lets fill in the fast block, it can only be used with no IP options! */
12281 if (rack->r_is_v6) {
12282 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
12283 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
12285 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
12286 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
12287 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
12288 udp->uh_dport = tp->t_port;
12289 rack->r_ctl.fsb.udp = udp;
12290 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
12293 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1);
12294 rack->r_ctl.fsb.udp = NULL;
12296 tcpip_fillheaders(rack->rc_inp,
12298 ip6, rack->r_ctl.fsb.th);
12303 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr);
12304 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
12306 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
12307 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
12308 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
12309 udp->uh_dport = tp->t_port;
12310 rack->r_ctl.fsb.udp = udp;
12311 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
12314 rack->r_ctl.fsb.udp = NULL;
12315 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1);
12317 tcpip_fillheaders(rack->rc_inp,
12319 ip, rack->r_ctl.fsb.th);
12322 rack->r_fsb_inited = 1;
12326 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack)
12329 * Allocate the larger of spaces V6 if available else just
12330 * V4 and include udphdr (overbook)
12333 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr);
12335 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr);
12337 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len,
12338 M_TCPFSB, M_NOWAIT|M_ZERO);
12339 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) {
12342 rack->r_fsb_inited = 0;
12347 rack_init(struct tcpcb *tp)
12349 struct inpcb *inp = tptoinpcb(tp);
12350 struct tcp_rack *rack = NULL;
12352 struct rack_sendmap *insret;
12354 uint32_t iwin, snt, us_cts;
12357 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
12358 if (tp->t_fb_ptr == NULL) {
12360 * We need to allocate memory but cant. The INP and INP_INFO
12361 * locks and they are recursive (happens during setup. So a
12362 * scheme to drop the locks fails :(
12367 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack));
12369 rack = (struct tcp_rack *)tp->t_fb_ptr;
12370 RB_INIT(&rack->r_ctl.rc_mtree);
12371 TAILQ_INIT(&rack->r_ctl.rc_free);
12372 TAILQ_INIT(&rack->r_ctl.rc_tmap);
12374 rack->rc_inp = inp;
12376 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0;
12377 /* Probably not needed but lets be sure */
12378 rack_clear_rate_sample(rack);
12380 * Save off the default values, socket options will poke
12381 * at these if pacing is not on or we have not yet
12382 * reached where pacing is on (gp_ready/fixed enabled).
12383 * When they get set into the CC module (when gp_ready
12384 * is enabled or we enable fixed) then we will set these
12385 * values into the CC and place in here the old values
12386 * so we have a restoral. Then we will set the flag
12387 * rc_pacing_cc_set. That way whenever we turn off pacing
12388 * or switch off this stack, we will know to go restore
12389 * the saved values.
12391 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn;
12392 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn;
12393 /* We want abe like behavior as well */
12394 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED;
12395 rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
12396 rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
12397 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
12398 rack->r_ctl.roundends = tp->snd_max;
12400 rack->use_rack_rr = 1;
12401 if (V_tcp_delack_enabled)
12402 tp->t_delayed_ack = 1;
12404 tp->t_delayed_ack = 0;
12405 #ifdef TCP_ACCOUNTING
12406 if (rack_tcp_accounting) {
12407 tp->t_flags2 |= TF2_TCP_ACCOUNTING;
12410 if (rack_enable_shared_cwnd)
12411 rack->rack_enable_scwnd = 1;
12412 rack->rc_user_set_max_segs = rack_hptsi_segments;
12413 rack->rc_force_max_seg = 0;
12414 if (rack_use_imac_dack)
12415 rack->rc_dack_mode = 1;
12416 TAILQ_INIT(&rack->r_ctl.opt_list);
12417 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
12418 rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
12419 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
12420 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff;
12421 rack->r_ctl.rc_highest_us_rtt = 0;
12422 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap;
12423 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop);
12424 if (rack_use_cmp_acks)
12425 rack->r_use_cmp_ack = 1;
12426 if (rack_disable_prr)
12427 rack->rack_no_prr = 1;
12428 if (rack_gp_no_rec_chg)
12429 rack->rc_gp_no_rec_chg = 1;
12430 if (rack_pace_every_seg && tcp_can_enable_pacing()) {
12431 rack->rc_always_pace = 1;
12432 if (rack->use_fixed_rate || rack->gp_ready)
12433 rack_set_cc_pacing(rack);
12435 rack->rc_always_pace = 0;
12436 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack)
12437 rack->r_mbuf_queue = 1;
12439 rack->r_mbuf_queue = 0;
12440 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
12441 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
12443 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
12444 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12445 if (rack_limits_scwnd)
12446 rack->r_limit_scw = 1;
12448 rack->r_limit_scw = 0;
12449 rack->rc_labc = V_tcp_abc_l_var;
12450 rack->r_ctl.rc_high_rwnd = tp->snd_wnd;
12451 rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
12452 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
12453 rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
12454 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
12455 rack->r_ctl.rc_min_to = rack_min_to;
12456 microuptime(&rack->r_ctl.act_rcv_time);
12457 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
12458 rack->rc_init_win = rack_default_init_window;
12459 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss;
12460 if (rack_hw_up_only)
12461 rack->r_up_only = 1;
12462 if (rack_do_dyn_mul) {
12463 /* When dynamic adjustment is on CA needs to start at 100% */
12464 rack->rc_gp_dyn_mul = 1;
12465 if (rack_do_dyn_mul >= 100)
12466 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
12468 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
12469 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec;
12470 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
12471 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
12472 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN,
12473 rack_probertt_filter_life);
12474 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
12475 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
12476 rack->r_ctl.rc_time_of_last_probertt = us_cts;
12477 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks();
12478 rack->r_ctl.rc_time_probertt_starts = 0;
12479 if (rack_dsack_std_based & 0x1) {
12480 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */
12481 rack->rc_rack_tmr_std_based = 1;
12483 if (rack_dsack_std_based & 0x2) {
12484 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */
12485 rack->rc_rack_use_dsack = 1;
12487 /* We require at least one measurement, even if the sysctl is 0 */
12488 if (rack_req_measurements)
12489 rack->r_ctl.req_measurements = rack_req_measurements;
12491 rack->r_ctl.req_measurements = 1;
12492 if (rack_enable_hw_pacing)
12493 rack->rack_hdw_pace_ena = 1;
12494 if (rack_hw_rate_caps)
12495 rack->r_rack_hw_rate_caps = 1;
12496 /* Do we force on detection? */
12497 #ifdef NETFLIX_EXP_DETECTION
12498 if (tcp_force_detection)
12499 rack->do_detection = 1;
12502 rack->do_detection = 0;
12503 if (rack_non_rxt_use_cr)
12504 rack->rack_rec_nonrxt_use_cr = 1;
12505 err = rack_init_fsb(tp, rack);
12507 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12508 tp->t_fb_ptr = NULL;
12511 if (tp->snd_una != tp->snd_max) {
12512 /* Create a send map for the current outstanding data */
12513 struct rack_sendmap *rsm;
12515 rsm = rack_alloc(rack);
12517 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12518 tp->t_fb_ptr = NULL;
12521 rsm->r_no_rtt_allowed = 1;
12522 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
12523 rsm->r_rtr_cnt = 1;
12524 rsm->r_rtr_bytes = 0;
12525 if (tp->t_flags & TF_SENTFIN)
12526 rsm->r_flags |= RACK_HAS_FIN;
12527 if ((tp->snd_una == tp->iss) &&
12528 !TCPS_HAVEESTABLISHED(tp->t_state))
12529 rsm->r_flags |= RACK_HAS_SYN;
12530 rsm->r_start = tp->snd_una;
12531 rsm->r_end = tp->snd_max;
12533 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) {
12534 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff);
12536 rsm->orig_m_len = rsm->m->m_len;
12538 rsm->orig_m_len = 0;
12541 * This can happen if we have a stand-alone FIN or
12545 rsm->orig_m_len = 0;
12549 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12551 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12552 if (insret != NULL) {
12553 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p",
12554 insret, rack, rsm);
12557 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
12558 rsm->r_in_tmap = 1;
12561 * Timers in Rack are kept in microseconds so lets
12562 * convert any initial incoming variables
12563 * from ticks into usecs. Note that we
12564 * also change the values of t_srtt and t_rttvar, if
12565 * they are non-zero. They are kept with a 5
12566 * bit decimal so we have to carefully convert
12567 * these to get the full precision.
12569 rack_convert_rtts(tp);
12570 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow);
12571 if (rack_do_hystart) {
12572 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED;
12573 if (rack_do_hystart > 1)
12574 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND;
12575 if (rack_do_hystart > 2)
12576 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH;
12578 if (rack_def_profile)
12579 rack_set_profile(rack, rack_def_profile);
12580 /* Cancel the GP measurement in progress */
12581 tp->t_flags &= ~TF_GPUTINPROG;
12582 if (SEQ_GT(tp->snd_max, tp->iss))
12583 snt = tp->snd_max - tp->iss;
12586 iwin = rc_init_window(rack);
12588 /* We are not past the initial window
12589 * so we need to make sure cwnd is
12592 if (tp->snd_cwnd < iwin)
12593 tp->snd_cwnd = iwin;
12595 * If we are within the initial window
12596 * we want ssthresh to be unlimited. Setting
12597 * it to the rwnd (which the default stack does
12598 * and older racks) is not really a good idea
12599 * since we want to be in SS and grow both the
12600 * cwnd and the rwnd (via dynamic rwnd growth). If
12601 * we set it to the rwnd then as the peer grows its
12602 * rwnd we will be stuck in CA and never hit SS.
12604 * Its far better to raise it up high (this takes the
12605 * risk that there as been a loss already, probably
12606 * we should have an indicator in all stacks of loss
12607 * but we don't), but considering the normal use this
12608 * is a risk worth taking. The consequences of not
12609 * hitting SS are far worse than going one more time
12610 * into it early on (before we have sent even a IW).
12611 * It is highly unlikely that we will have had a loss
12612 * before getting the IW out.
12614 tp->snd_ssthresh = 0xffffffff;
12616 rack_stop_all_timers(tp);
12617 /* Lets setup the fsb block */
12618 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
12619 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur,
12620 __LINE__, RACK_RTTS_INIT);
12625 rack_handoff_ok(struct tcpcb *tp)
12627 if ((tp->t_state == TCPS_CLOSED) ||
12628 (tp->t_state == TCPS_LISTEN)) {
12629 /* Sure no problem though it may not stick */
12632 if ((tp->t_state == TCPS_SYN_SENT) ||
12633 (tp->t_state == TCPS_SYN_RECEIVED)) {
12635 * We really don't know if you support sack,
12636 * you have to get to ESTAB or beyond to tell.
12640 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) {
12642 * Rack will only send a FIN after all data is acknowledged.
12643 * So in this case we have more data outstanding. We can't
12644 * switch stacks until either all data and only the FIN
12645 * is left (in which case rack_init() now knows how
12646 * to deal with that) <or> all is acknowledged and we
12647 * are only left with incoming data, though why you
12648 * would want to switch to rack after all data is acknowledged
12649 * I have no idea (rrs)!
12653 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){
12657 * If we reach here we don't do SACK on this connection so we can
12665 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
12667 struct inpcb *inp = tptoinpcb(tp);
12669 if (tp->t_fb_ptr) {
12670 struct tcp_rack *rack;
12671 struct rack_sendmap *rsm, *nrsm;
12673 struct rack_sendmap *rm;
12676 rack = (struct tcp_rack *)tp->t_fb_ptr;
12677 if (tp->t_in_pkt) {
12679 * It is unsafe to process the packets since a
12680 * reset may be lurking in them (its rare but it
12681 * can occur). If we were to find a RST, then we
12682 * would end up dropping the connection and the
12683 * INP lock, so when we return the caller (tcp_usrreq)
12684 * will blow up when it trys to unlock the inp.
12686 struct mbuf *save, *m;
12689 tp->t_in_pkt = NULL;
12690 tp->t_tail_pkt = NULL;
12692 save = m->m_nextpkt;
12693 m->m_nextpkt = NULL;
12698 tp->t_flags &= ~TF_FORCEDATA;
12699 #ifdef NETFLIX_SHARED_CWND
12700 if (rack->r_ctl.rc_scw) {
12703 if (rack->r_limit_scw)
12704 limit = max(1, rack->r_ctl.rc_lowest_us_rtt);
12707 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw,
12708 rack->r_ctl.rc_scw_index,
12710 rack->r_ctl.rc_scw = NULL;
12713 if (rack->r_ctl.fsb.tcp_ip_hdr) {
12714 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB);
12715 rack->r_ctl.fsb.tcp_ip_hdr = NULL;
12716 rack->r_ctl.fsb.th = NULL;
12718 /* Convert back to ticks, with */
12719 if (tp->t_srtt > 1) {
12720 uint32_t val, frac;
12722 val = USEC_2_TICKS(tp->t_srtt);
12723 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz);
12724 tp->t_srtt = val << TCP_RTT_SHIFT;
12726 * frac is the fractional part here is left
12727 * over from converting to hz and shifting.
12728 * We need to convert this to the 5 bit
12733 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC);
12735 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC);
12737 tp->t_srtt += frac;
12740 if (tp->t_rttvar) {
12741 uint32_t val, frac;
12743 val = USEC_2_TICKS(tp->t_rttvar);
12744 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz);
12745 tp->t_rttvar = val << TCP_RTTVAR_SHIFT;
12747 * frac is the fractional part here is left
12748 * over from converting to hz and shifting.
12749 * We need to convert this to the 5 bit
12754 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC);
12756 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC);
12758 tp->t_rttvar += frac;
12761 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur);
12762 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow);
12763 if (rack->rc_always_pace) {
12764 tcp_decrement_paced_conn();
12765 rack_undo_cc_pacing(rack);
12766 rack->rc_always_pace = 0;
12768 /* Clean up any options if they were not applied */
12769 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) {
12770 struct deferred_opt_list *dol;
12772 dol = TAILQ_FIRST(&rack->r_ctl.opt_list);
12773 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
12774 free(dol, M_TCPDO);
12776 /* rack does not use force data but other stacks may clear it */
12777 if (rack->r_ctl.crte != NULL) {
12778 tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
12779 rack->rack_hdrw_pacing = 0;
12780 rack->r_ctl.crte = NULL;
12782 #ifdef TCP_BLACKBOX
12783 tcp_log_flowend(tp);
12785 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) {
12787 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12789 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12791 panic("At fini, rack:%p rsm:%p rm:%p",
12795 uma_zfree(rack_zone, rsm);
12797 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
12799 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
12800 uma_zfree(rack_zone, rsm);
12801 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
12803 rack->rc_free_cnt = 0;
12804 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12805 tp->t_fb_ptr = NULL;
12807 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
12808 inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
12809 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
12810 inp->inp_flags2 &= ~INP_MBUF_ACKCMP;
12811 /* Cancel the GP measurement in progress */
12812 tp->t_flags &= ~TF_GPUTINPROG;
12813 inp->inp_flags2 &= ~INP_MBUF_L_ACKS;
12814 /* Make sure snd_nxt is correctly set */
12815 tp->snd_nxt = tp->snd_max;
12819 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
12821 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) {
12822 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0;
12824 switch (tp->t_state) {
12825 case TCPS_SYN_SENT:
12826 rack->r_state = TCPS_SYN_SENT;
12827 rack->r_substate = rack_do_syn_sent;
12829 case TCPS_SYN_RECEIVED:
12830 rack->r_state = TCPS_SYN_RECEIVED;
12831 rack->r_substate = rack_do_syn_recv;
12833 case TCPS_ESTABLISHED:
12834 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12835 rack->r_state = TCPS_ESTABLISHED;
12836 rack->r_substate = rack_do_established;
12838 case TCPS_CLOSE_WAIT:
12839 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12840 rack->r_state = TCPS_CLOSE_WAIT;
12841 rack->r_substate = rack_do_close_wait;
12843 case TCPS_FIN_WAIT_1:
12844 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12845 rack->r_state = TCPS_FIN_WAIT_1;
12846 rack->r_substate = rack_do_fin_wait_1;
12849 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12850 rack->r_state = TCPS_CLOSING;
12851 rack->r_substate = rack_do_closing;
12853 case TCPS_LAST_ACK:
12854 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12855 rack->r_state = TCPS_LAST_ACK;
12856 rack->r_substate = rack_do_lastack;
12858 case TCPS_FIN_WAIT_2:
12859 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12860 rack->r_state = TCPS_FIN_WAIT_2;
12861 rack->r_substate = rack_do_fin_wait_2;
12865 case TCPS_TIME_WAIT:
12869 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
12870 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
12875 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
12878 * We received an ack, and then did not
12879 * call send or were bounced out due to the
12880 * hpts was running. Now a timer is up as well, is
12881 * it the right timer?
12883 struct rack_sendmap *rsm;
12886 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
12887 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
12889 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
12890 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
12891 (tmr_up == PACE_TMR_RXT)) {
12892 /* Should be an RXT */
12896 /* Nothing outstanding? */
12897 if (tp->t_flags & TF_DELACK) {
12898 if (tmr_up == PACE_TMR_DELACK)
12899 /* We are supposed to have delayed ack up and we do */
12901 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) {
12903 * if we hit enobufs then we would expect the possibility
12904 * of nothing outstanding and the RXT up (and the hptsi timer).
12907 } else if (((V_tcp_always_keepalive ||
12908 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
12909 (tp->t_state <= TCPS_CLOSING)) &&
12910 (tmr_up == PACE_TMR_KEEP) &&
12911 (tp->snd_max == tp->snd_una)) {
12912 /* We should have keep alive up and we do */
12916 if (SEQ_GT(tp->snd_max, tp->snd_una) &&
12917 ((tmr_up == PACE_TMR_TLP) ||
12918 (tmr_up == PACE_TMR_RACK) ||
12919 (tmr_up == PACE_TMR_RXT))) {
12921 * Either a Rack, TLP or RXT is fine if we
12922 * have outstanding data.
12925 } else if (tmr_up == PACE_TMR_DELACK) {
12927 * If the delayed ack was going to go off
12928 * before the rtx/tlp/rack timer were going to
12929 * expire, then that would be the timer in control.
12930 * Note we don't check the time here trusting the
12936 * Ok the timer originally started is not what we want now.
12937 * We will force the hpts to be stopped if any, and restart
12938 * with the slot set to what was in the saved slot.
12940 if (tcp_in_hpts(rack->rc_inp)) {
12941 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
12944 us_cts = tcp_get_usecs(NULL);
12945 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
12947 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
12949 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
12951 tcp_hpts_remove(rack->rc_inp);
12953 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
12954 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
12959 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq)
12961 if ((SEQ_LT(tp->snd_wl1, seq) ||
12962 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) ||
12963 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) {
12964 /* keep track of pure window updates */
12965 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd))
12966 KMOD_TCPSTAT_INC(tcps_rcvwinupd);
12967 tp->snd_wnd = tiwin;
12968 rack_validate_fo_sendwin_up(tp, rack);
12971 if (tp->snd_wnd > tp->max_sndwnd)
12972 tp->max_sndwnd = tp->snd_wnd;
12973 rack->r_wanted_output = 1;
12974 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) {
12975 tp->snd_wnd = tiwin;
12976 rack_validate_fo_sendwin_up(tp, rack);
12980 /* Not a valid win update */
12983 /* Do we exit persists? */
12984 if ((rack->rc_in_persist != 0) &&
12985 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
12986 rack->r_ctl.rc_pace_min_segs))) {
12987 rack_exit_persist(tp, rack, cts);
12989 /* Do we enter persists? */
12990 if ((rack->rc_in_persist == 0) &&
12991 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
12992 TCPS_HAVEESTABLISHED(tp->t_state) &&
12993 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
12994 sbavail(&tptosocket(tp)->so_snd) &&
12995 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
12997 * Here the rwnd is less than
12998 * the pacing size, we are established,
12999 * nothing is outstanding, and there is
13000 * data to send. Enter persists.
13002 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
13007 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq)
13010 if (tcp_bblogging_on(rack->rc_tp)) {
13011 struct inpcb *inp = tptoinpcb(tp);
13012 union tcp_log_stackspecific log;
13013 struct timeval ltv;
13014 char tcp_hdr_buf[60];
13016 struct timespec ts;
13017 uint32_t orig_snd_una;
13020 #ifdef NETFLIX_HTTP_LOGGING
13021 struct http_sendfile_track *http_req;
13023 if (SEQ_GT(ae->ack, tp->snd_una)) {
13024 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1));
13026 http_req = tcp_http_find_req_for_seq(tp, ae->ack);
13029 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
13030 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
13031 if (rack->rack_no_prr == 0)
13032 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
13034 log.u_bbr.flex1 = 0;
13035 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
13036 log.u_bbr.use_lt_bw <<= 1;
13037 log.u_bbr.use_lt_bw |= rack->r_might_revert;
13038 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
13039 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
13040 log.u_bbr.pkts_out = tp->t_maxseg;
13041 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
13042 log.u_bbr.flex7 = 1;
13043 log.u_bbr.lost = ae->flags;
13044 log.u_bbr.cwnd_gain = ackval;
13045 log.u_bbr.pacing_gain = 0x2;
13046 if (ae->flags & TSTMP_HDWR) {
13047 /* Record the hardware timestamp if present */
13048 log.u_bbr.flex3 = M_TSTMP;
13049 ts.tv_sec = ae->timestamp / 1000000000;
13050 ts.tv_nsec = ae->timestamp % 1000000000;
13051 ltv.tv_sec = ts.tv_sec;
13052 ltv.tv_usec = ts.tv_nsec / 1000;
13053 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v);
13054 } else if (ae->flags & TSTMP_LRO) {
13055 /* Record the LRO the arrival timestamp */
13056 log.u_bbr.flex3 = M_TSTMP_LRO;
13057 ts.tv_sec = ae->timestamp / 1000000000;
13058 ts.tv_nsec = ae->timestamp % 1000000000;
13059 ltv.tv_sec = ts.tv_sec;
13060 ltv.tv_usec = ts.tv_nsec / 1000;
13061 log.u_bbr.flex5 = tcp_tv_to_usectick(<v);
13063 log.u_bbr.timeStamp = tcp_get_usecs(<v);
13064 /* Log the rcv time */
13065 log.u_bbr.delRate = ae->timestamp;
13066 #ifdef NETFLIX_HTTP_LOGGING
13067 log.u_bbr.applimited = tp->t_http_closed;
13068 log.u_bbr.applimited <<= 8;
13069 log.u_bbr.applimited |= tp->t_http_open;
13070 log.u_bbr.applimited <<= 8;
13071 log.u_bbr.applimited |= tp->t_http_req;
13073 /* Copy out any client req info */
13075 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
13077 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
13078 log.u_bbr.rttProp = http_req->timestamp;
13079 log.u_bbr.cur_del_rate = http_req->start;
13080 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
13081 log.u_bbr.flex8 |= 1;
13083 log.u_bbr.flex8 |= 2;
13084 log.u_bbr.bw_inuse = http_req->end;
13086 log.u_bbr.flex6 = http_req->start_seq;
13087 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
13088 log.u_bbr.flex8 |= 4;
13089 log.u_bbr.epoch = http_req->end_seq;
13093 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf));
13094 th = (struct tcphdr *)tcp_hdr_buf;
13095 th->th_seq = ae->seq;
13096 th->th_ack = ae->ack;
13097 th->th_win = ae->win;
13098 /* Now fill in the ports */
13099 th->th_sport = inp->inp_fport;
13100 th->th_dport = inp->inp_lport;
13101 tcp_set_flags(th, ae->flags);
13102 /* Now do we have a timestamp option? */
13103 if (ae->flags & HAS_TSTMP) {
13107 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2);
13108 cp = (u_char *)(th + 1);
13113 *cp = TCPOPT_TIMESTAMP;
13115 *cp = TCPOLEN_TIMESTAMP;
13117 val = htonl(ae->ts_value);
13118 bcopy((char *)&val,
13119 (char *)cp, sizeof(uint32_t));
13120 val = htonl(ae->ts_echo);
13121 bcopy((char *)&val,
13122 (char *)(cp + 4), sizeof(uint32_t));
13124 th->th_off = (sizeof(struct tcphdr) >> 2);
13127 * For sane logging we need to play a little trick.
13128 * If the ack were fully processed we would have moved
13129 * snd_una to high_seq, but since compressed acks are
13130 * processed in two phases, at this point (logging) snd_una
13131 * won't be advanced. So we would see multiple acks showing
13132 * the advancement. We can prevent that by "pretending" that
13133 * snd_una was advanced and then un-advancing it so that the
13134 * logging code has the right value for tlb_snd_una.
13136 if (tp->snd_una != high_seq) {
13137 orig_snd_una = tp->snd_una;
13138 tp->snd_una = high_seq;
13142 TCP_LOG_EVENTP(tp, th,
13143 &tptosocket(tp)->so_rcv,
13144 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0,
13145 0, &log, true, <v);
13147 tp->snd_una = orig_snd_una;
13154 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts)
13158 * A persist or keep-alive was forced out, update our
13159 * min rtt time. Note now worry about lost responses.
13160 * When a subsequent keep-alive or persist times out
13161 * and forced_ack is still on, then the last probe
13162 * was not responded to. In such cases we have a
13163 * sysctl that controls the behavior. Either we apply
13164 * the rtt but with reduced confidence (0). Or we just
13165 * plain don't apply the rtt estimate. Having data flow
13166 * will clear the probe_not_answered flag i.e. cum-ack
13167 * move forward <or> exiting and reentering persists.
13170 rack->forced_ack = 0;
13171 rack->rc_tp->t_rxtshift = 0;
13172 if ((rack->rc_in_persist &&
13173 (tiwin == rack->rc_tp->snd_wnd)) ||
13174 (rack->rc_in_persist == 0)) {
13176 * In persists only apply the RTT update if this is
13177 * a response to our window probe. And that
13178 * means the rwnd sent must match the current
13179 * snd_wnd. If it does not, then we got a
13180 * window update ack instead. For keepalive
13181 * we allow the answer no matter what the window.
13183 * Note that if the probe_not_answered is set then
13184 * the forced_ack_ts is the oldest one i.e. the first
13185 * probe sent that might have been lost. This assures
13186 * us that if we do calculate an RTT it is longer not
13187 * some short thing.
13189 if (rack->rc_in_persist)
13190 counter_u64_add(rack_persists_acks, 1);
13191 us_rtt = us_cts - rack->r_ctl.forced_ack_ts;
13194 if (rack->probe_not_answered == 0) {
13195 rack_apply_updated_usrtt(rack, us_rtt, us_cts);
13196 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1);
13198 /* We have a retransmitted probe here too */
13199 if (rack_apply_rtt_with_reduced_conf) {
13200 rack_apply_updated_usrtt(rack, us_rtt, us_cts);
13201 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1);
13208 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv)
13211 * Handle a "special" compressed ack mbuf. Each incoming
13212 * ack has only four possible dispositions:
13214 * A) It moves the cum-ack forward
13215 * B) It is behind the cum-ack.
13216 * C) It is a window-update ack.
13217 * D) It is a dup-ack.
13219 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES
13220 * in the incoming mbuf. We also need to still pay attention
13221 * to nxt_pkt since there may be another packet after this
13224 #ifdef TCP_ACCOUNTING
13229 struct timespec ts;
13230 struct tcp_rack *rack;
13231 struct tcp_ackent *ae;
13232 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack;
13233 int cnt, i, did_out, ourfinisacked = 0;
13234 struct tcpopt to_holder, *to = NULL;
13235 #ifdef TCP_ACCOUNTING
13236 int win_up_req = 0;
13239 int under_pacing = 1;
13241 #ifdef TCP_ACCOUNTING
13244 rack = (struct tcp_rack *)tp->t_fb_ptr;
13245 if (rack->gp_ready &&
13246 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT))
13251 if (rack->r_state != tp->t_state)
13252 rack_set_state(tp, rack);
13253 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
13254 (tp->t_flags & TF_GPUTINPROG)) {
13256 * We have a goodput in progress
13257 * and we have entered a late state.
13258 * Do we have enough data in the sb
13259 * to handle the GPUT request?
13263 bytes = tp->gput_ack - tp->gput_seq;
13264 if (SEQ_GT(tp->gput_seq, tp->snd_una))
13265 bytes += tp->gput_seq - tp->snd_una;
13266 if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
13268 * There are not enough bytes in the socket
13269 * buffer that have been sent to cover this
13270 * measurement. Cancel it.
13272 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
13273 rack->r_ctl.rc_gp_srtt /*flex1*/,
13275 0, 0, 18, __LINE__, NULL, 0);
13276 tp->t_flags &= ~TF_GPUTINPROG;
13281 KASSERT((m->m_len >= sizeof(struct tcp_ackent)),
13282 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len));
13283 cnt = m->m_len / sizeof(struct tcp_ackent);
13284 counter_u64_add(rack_multi_single_eq, cnt);
13285 high_seq = tp->snd_una;
13286 the_win = tp->snd_wnd;
13287 win_seq = tp->snd_wl1;
13288 win_upd_ack = tp->snd_wl2;
13289 cts = tcp_tv_to_usectick(tv);
13290 ms_cts = tcp_tv_to_mssectick(tv);
13291 rack->r_ctl.rc_rcvtime = cts;
13292 segsiz = ctf_fixed_maxseg(tp);
13293 if ((rack->rc_gp_dyn_mul) &&
13294 (rack->use_fixed_rate == 0) &&
13295 (rack->rc_always_pace)) {
13296 /* Check in on probertt */
13297 rack_check_probe_rtt(rack, cts);
13299 for (i = 0; i < cnt; i++) {
13300 #ifdef TCP_ACCOUNTING
13301 ts_val = get_cyclecount();
13303 rack_clear_rate_sample(rack);
13304 ae = ((mtod(m, struct tcp_ackent *)) + i);
13305 /* Setup the window */
13306 tiwin = ae->win << tp->snd_scale;
13307 if (tiwin > rack->r_ctl.rc_high_rwnd)
13308 rack->r_ctl.rc_high_rwnd = tiwin;
13309 /* figure out the type of ack */
13310 if (SEQ_LT(ae->ack, high_seq)) {
13312 ae->ack_val_set = ACK_BEHIND;
13313 } else if (SEQ_GT(ae->ack, high_seq)) {
13315 ae->ack_val_set = ACK_CUMACK;
13316 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){
13318 ae->ack_val_set = ACK_DUPACK;
13321 ae->ack_val_set = ACK_RWND;
13323 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq);
13324 /* Validate timestamp */
13325 if (ae->flags & HAS_TSTMP) {
13326 /* Setup for a timestamp */
13327 to->to_flags = TOF_TS;
13328 ae->ts_echo -= tp->ts_offset;
13329 to->to_tsecr = ae->ts_echo;
13330 to->to_tsval = ae->ts_value;
13332 * If echoed timestamp is later than the current time, fall back to
13333 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
13334 * were used when this connection was established.
13336 if (TSTMP_GT(ae->ts_echo, ms_cts))
13338 if (tp->ts_recent &&
13339 TSTMP_LT(ae->ts_value, tp->ts_recent)) {
13340 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) {
13341 #ifdef TCP_ACCOUNTING
13342 rdstc = get_cyclecount();
13343 if (rdstc > ts_val) {
13344 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13345 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
13352 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) &&
13353 SEQ_LEQ(tp->last_ack_sent, ae->seq)) {
13354 tp->ts_recent_age = tcp_ts_getticks();
13355 tp->ts_recent = ae->ts_value;
13358 /* Setup for a no options */
13361 /* Update the rcv time and perform idle reduction possibly */
13362 if (tp->t_idle_reduce &&
13363 (tp->snd_max == tp->snd_una) &&
13364 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
13365 counter_u64_add(rack_input_idle_reduces, 1);
13366 rack_cc_after_idle(rack, tp);
13368 tp->t_rcvtime = ticks;
13369 /* Now what about ECN of a chain of pure ACKs? */
13370 if (tcp_ecn_input_segment(tp, ae->flags, 0,
13371 tcp_packets_this_ack(tp, ae->ack),
13373 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__);
13374 #ifdef TCP_ACCOUNTING
13375 /* Count for the specific type of ack in */
13376 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13377 tp->tcp_cnt_counters[ae->ack_val_set]++;
13381 * Note how we could move up these in the determination
13382 * above, but we don't so that way the timestamp checks (and ECN)
13383 * is done first before we do any processing on the ACK.
13384 * The non-compressed path through the code has this
13385 * weakness (noted by @jtl) that it actually does some
13386 * processing before verifying the timestamp information.
13387 * We don't take that path here which is why we set
13388 * the ack_val_set first, do the timestamp and ecn
13389 * processing, and then look at what we have setup.
13391 if (ae->ack_val_set == ACK_BEHIND) {
13393 * Case B flag reordering, if window is not closed
13394 * or it could be a keep-alive or persists
13396 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) {
13397 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
13399 } else if (ae->ack_val_set == ACK_DUPACK) {
13401 rack_strike_dupack(rack);
13402 } else if (ae->ack_val_set == ACK_RWND) {
13404 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) {
13405 ts.tv_sec = ae->timestamp / 1000000000;
13406 ts.tv_nsec = ae->timestamp % 1000000000;
13407 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
13408 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
13410 rack->r_ctl.act_rcv_time = *tv;
13412 if (rack->forced_ack) {
13413 rack_handle_probe_response(rack, tiwin,
13414 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
13416 #ifdef TCP_ACCOUNTING
13419 win_upd_ack = ae->ack;
13422 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq);
13425 if (SEQ_GT(ae->ack, tp->snd_max)) {
13427 * We just send an ack since the incoming
13428 * ack is beyond the largest seq we sent.
13430 if ((tp->t_flags & TF_ACKNOW) == 0) {
13431 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt);
13432 if (tp->t_flags && TF_ACKNOW)
13433 rack->r_wanted_output = 1;
13437 /* If the window changed setup to update */
13438 if (tiwin != tp->snd_wnd) {
13439 win_upd_ack = ae->ack;
13442 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq);
13444 #ifdef TCP_ACCOUNTING
13445 /* Account for the acks */
13446 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13447 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz);
13450 high_seq = ae->ack;
13451 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
13452 union tcp_log_stackspecific log;
13455 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
13456 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
13457 log.u_bbr.flex1 = high_seq;
13458 log.u_bbr.flex2 = rack->r_ctl.roundends;
13459 log.u_bbr.flex3 = rack->r_ctl.current_round;
13460 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround;
13461 log.u_bbr.flex8 = 8;
13462 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
13463 0, &log, false, NULL, NULL, 0, &tv);
13466 * The draft (v3) calls for us to use SEQ_GEQ, but that
13467 * causes issues when we are just going app limited. Lets
13468 * instead use SEQ_GT <or> where its equal but more data
13471 if ((SEQ_GT(high_seq, rack->r_ctl.roundends)) ||
13472 ((high_seq == rack->r_ctl.roundends) &&
13473 SEQ_GT(tp->snd_max, tp->snd_una))) {
13474 rack->r_ctl.current_round++;
13475 rack->r_ctl.roundends = tp->snd_max;
13476 if (CC_ALGO(tp)->newround != NULL) {
13477 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round);
13480 /* Setup our act_rcv_time */
13481 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) {
13482 ts.tv_sec = ae->timestamp / 1000000000;
13483 ts.tv_nsec = ae->timestamp % 1000000000;
13484 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
13485 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
13487 rack->r_ctl.act_rcv_time = *tv;
13489 rack_process_to_cumack(tp, rack, ae->ack, cts, to);
13490 if (rack->rc_dsack_round_seen) {
13491 /* Is the dsack round over? */
13492 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) {
13494 rack->rc_dsack_round_seen = 0;
13495 rack_log_dsack_event(rack, 3, __LINE__, 0, 0);
13500 /* And lets be sure to commit the rtt measurements for this ack */
13501 tcp_rack_xmit_timer_commit(rack, tp);
13502 #ifdef TCP_ACCOUNTING
13503 rdstc = get_cyclecount();
13504 if (rdstc > ts_val) {
13505 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13506 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
13507 if (ae->ack_val_set == ACK_CUMACK)
13508 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val);
13513 #ifdef TCP_ACCOUNTING
13514 ts_val = get_cyclecount();
13516 /* Tend to any collapsed window */
13517 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) {
13518 /* The peer collapsed the window */
13519 rack_collapsed_window(rack, (tp->snd_max - high_seq), __LINE__);
13520 } else if (rack->rc_has_collapsed)
13521 rack_un_collapse_window(rack, __LINE__);
13522 if ((rack->r_collapse_point_valid) &&
13523 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point)))
13524 rack->r_collapse_point_valid = 0;
13525 acked_amount = acked = (high_seq - tp->snd_una);
13528 * Clear the probe not answered flag
13529 * since cum-ack moved forward.
13531 rack->probe_not_answered = 0;
13532 if (rack->sack_attack_disable == 0)
13533 rack_do_decay(rack);
13534 if (acked >= segsiz) {
13536 * You only get credit for
13537 * MSS and greater (and you get extra
13538 * credit for larger cum-ack moves).
13542 ac = acked / segsiz;
13543 rack->r_ctl.ack_count += ac;
13544 counter_u64_add(rack_ack_total, ac);
13546 if (rack->r_ctl.ack_count > 0xfff00000) {
13548 * reduce the number to keep us under
13551 rack->r_ctl.ack_count /= 2;
13552 rack->r_ctl.sack_count /= 2;
13554 if (tp->t_flags & TF_NEEDSYN) {
13556 * T/TCP: Connection was half-synchronized, and our SYN has
13557 * been ACK'd (so connection is now fully synchronized). Go
13558 * to non-starred state, increment snd_una for ACK of SYN,
13559 * and check if we can do window scaling.
13561 tp->t_flags &= ~TF_NEEDSYN;
13563 acked_amount = acked = (high_seq - tp->snd_una);
13565 if (acked > sbavail(&so->so_snd))
13566 acked_amount = sbavail(&so->so_snd);
13567 #ifdef NETFLIX_EXP_DETECTION
13569 * We only care on a cum-ack move if we are in a sack-disabled
13570 * state. We have already added in to the ack_count, and we never
13571 * would disable on a cum-ack move, so we only care to do the
13572 * detection if it may "undo" it, i.e. we were in disabled already.
13574 if (rack->sack_attack_disable)
13575 rack_do_detection(tp, rack, acked_amount, segsiz);
13577 if (IN_FASTRECOVERY(tp->t_flags) &&
13578 (rack->rack_no_prr == 0))
13579 rack_update_prr(tp, rack, acked_amount, high_seq);
13580 if (IN_RECOVERY(tp->t_flags)) {
13581 if (SEQ_LT(high_seq, tp->snd_recover) &&
13582 (SEQ_LT(high_seq, tp->snd_max))) {
13583 tcp_rack_partialack(tp);
13585 rack_post_recovery(tp, high_seq);
13589 /* Handle the rack-log-ack part (sendmap) */
13590 if ((sbused(&so->so_snd) == 0) &&
13591 (acked > acked_amount) &&
13592 (tp->t_state >= TCPS_FIN_WAIT_1) &&
13593 (tp->t_flags & TF_SENTFIN)) {
13595 * We must be sure our fin
13596 * was sent and acked (we can be
13597 * in FIN_WAIT_1 without having
13602 * Lets make sure snd_una is updated
13603 * since most likely acked_amount = 0 (it
13606 tp->snd_una = high_seq;
13608 /* Did we make a RTO error? */
13609 if ((tp->t_flags & TF_PREVVALID) &&
13610 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
13611 tp->t_flags &= ~TF_PREVVALID;
13612 if (tp->t_rxtshift == 1 &&
13613 (int)(ticks - tp->t_badrxtwin) < 0)
13614 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__);
13616 /* Handle the data in the socket buffer */
13617 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1);
13618 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
13619 if (acked_amount > 0) {
13620 struct mbuf *mfree;
13622 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery);
13623 SOCKBUF_LOCK(&so->so_snd);
13624 mfree = sbcut_locked(&so->so_snd, acked_amount);
13625 tp->snd_una = high_seq;
13626 /* Note we want to hold the sb lock through the sendmap adjust */
13627 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
13628 /* Wake up the socket if we have room to write more */
13629 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
13630 sowwakeup_locked(so);
13633 /* update progress */
13634 tp->t_acktime = ticks;
13635 rack_log_progress_event(rack, tp, tp->t_acktime,
13636 PROGRESS_UPDATE, __LINE__);
13637 /* Clear out shifts and such */
13638 tp->t_rxtshift = 0;
13639 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
13640 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
13641 rack->rc_tlp_in_progress = 0;
13642 rack->r_ctl.rc_tlp_cnt_out = 0;
13643 /* Send recover and snd_nxt must be dragged along */
13644 if (SEQ_GT(tp->snd_una, tp->snd_recover))
13645 tp->snd_recover = tp->snd_una;
13646 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
13647 tp->snd_nxt = tp->snd_una;
13649 * If the RXT timer is running we want to
13650 * stop it, so we can restart a TLP (or new RXT).
13652 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
13653 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13654 #ifdef NETFLIX_HTTP_LOGGING
13655 tcp_http_check_for_comp(rack->rc_tp, high_seq);
13657 tp->snd_wl2 = high_seq;
13659 if (under_pacing &&
13660 (rack->use_fixed_rate == 0) &&
13661 (rack->in_probe_rtt == 0) &&
13662 rack->rc_gp_dyn_mul &&
13663 rack->rc_always_pace) {
13664 /* Check if we are dragging bottom */
13665 rack_check_bottom_drag(tp, rack, so, acked);
13667 if (tp->snd_una == tp->snd_max) {
13668 tp->t_flags &= ~TF_PREVVALID;
13669 rack->r_ctl.retran_during_recovery = 0;
13670 rack->r_ctl.dsack_byte_cnt = 0;
13671 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
13672 if (rack->r_ctl.rc_went_idle_time == 0)
13673 rack->r_ctl.rc_went_idle_time = 1;
13674 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
13675 if (sbavail(&tptosocket(tp)->so_snd) == 0)
13677 /* Set so we might enter persists... */
13678 rack->r_wanted_output = 1;
13679 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13680 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
13681 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
13682 (sbavail(&so->so_snd) == 0) &&
13683 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
13685 * The socket was gone and the
13686 * peer sent data (not now in the past), time to
13689 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13690 /* tcp_close will kill the inp pre-log the Reset */
13691 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
13692 #ifdef TCP_ACCOUNTING
13693 rdstc = get_cyclecount();
13694 if (rdstc > ts_val) {
13695 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13696 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13697 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13702 tp = tcp_close(tp);
13704 #ifdef TCP_ACCOUNTING
13710 * We would normally do drop-with-reset which would
13711 * send back a reset. We can't since we don't have
13712 * all the needed bits. Instead lets arrange for
13713 * a call to tcp_output(). That way since we
13714 * are in the closed state we will generate a reset.
13716 * Note if tcp_accounting is on we don't unpin since
13717 * we do that after the goto label.
13719 goto send_out_a_rst;
13721 if ((sbused(&so->so_snd) == 0) &&
13722 (tp->t_state >= TCPS_FIN_WAIT_1) &&
13723 (tp->t_flags & TF_SENTFIN)) {
13725 * If we can't receive any more data, then closing user can
13726 * proceed. Starting the timer is contrary to the
13727 * specification, but if we don't get a FIN we'll hang
13731 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13732 soisdisconnected(so);
13733 tcp_timer_activate(tp, TT_2MSL,
13734 (tcp_fast_finwait2_recycle ?
13735 tcp_finwait2_timeout :
13738 if (ourfinisacked == 0) {
13740 * We don't change to fin-wait-2 if we have our fin acked
13741 * which means we are probably in TCPS_CLOSING.
13743 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13747 /* Wake up the socket if we have room to write more */
13748 if (sbavail(&so->so_snd)) {
13749 rack->r_wanted_output = 1;
13750 if (ctf_progress_timeout_check(tp, true)) {
13751 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
13752 tp, tick, PROGRESS_DROP, __LINE__);
13754 * We cheat here and don't send a RST, we should send one
13755 * when the pacer drops the connection.
13757 #ifdef TCP_ACCOUNTING
13758 rdstc = get_cyclecount();
13759 if (rdstc > ts_val) {
13760 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13761 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13762 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13767 (void)tcp_drop(tp, ETIMEDOUT);
13772 if (ourfinisacked) {
13773 switch(tp->t_state) {
13775 #ifdef TCP_ACCOUNTING
13776 rdstc = get_cyclecount();
13777 if (rdstc > ts_val) {
13778 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13779 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13780 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13789 case TCPS_LAST_ACK:
13790 #ifdef TCP_ACCOUNTING
13791 rdstc = get_cyclecount();
13792 if (rdstc > ts_val) {
13793 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13794 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13795 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13800 tp = tcp_close(tp);
13801 ctf_do_drop(m, tp);
13804 case TCPS_FIN_WAIT_1:
13805 #ifdef TCP_ACCOUNTING
13806 rdstc = get_cyclecount();
13807 if (rdstc > ts_val) {
13808 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13809 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13810 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13814 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13815 soisdisconnected(so);
13816 tcp_timer_activate(tp, TT_2MSL,
13817 (tcp_fast_finwait2_recycle ?
13818 tcp_finwait2_timeout :
13821 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13827 if (rack->r_fast_output) {
13829 * We re doing fast output.. can we expand that?
13831 rack_gain_for_fastoutput(rack, tp, so, acked_amount);
13833 #ifdef TCP_ACCOUNTING
13834 rdstc = get_cyclecount();
13835 if (rdstc > ts_val) {
13836 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13837 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13838 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13842 } else if (win_up_req) {
13843 rdstc = get_cyclecount();
13844 if (rdstc > ts_val) {
13845 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13846 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val);
13851 /* Now is there a next packet, if so we are done */
13855 #ifdef TCP_ACCOUNTING
13858 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs);
13861 rack_handle_might_revert(tp, rack);
13862 ctf_calc_rwin(so, tp);
13863 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
13865 if (tcp_output(tp) < 0) {
13866 #ifdef TCP_ACCOUNTING
13873 rack_free_trim(rack);
13874 #ifdef TCP_ACCOUNTING
13877 rack_timer_audit(tp, rack, &so->so_snd);
13878 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs);
13884 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
13885 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
13886 int32_t nxt_pkt, struct timeval *tv)
13888 struct inpcb *inp = tptoinpcb(tp);
13889 #ifdef TCP_ACCOUNTING
13892 int32_t thflags, retval, did_out = 0;
13893 int32_t way_out = 0;
13895 * cts - is the current time from tv (caller gets ts) in microseconds.
13896 * ms_cts - is the current time from tv in milliseconds.
13897 * us_cts - is the time that LRO or hardware actually got the packet in microseconds.
13899 uint32_t cts, us_cts, ms_cts;
13900 uint32_t tiwin, high_seq;
13901 struct timespec ts;
13903 struct tcp_rack *rack;
13904 struct rack_sendmap *rsm;
13905 int32_t prev_state = 0;
13906 #ifdef TCP_ACCOUNTING
13907 int ack_val_set = 0xf;
13911 NET_EPOCH_ASSERT();
13912 INP_WLOCK_ASSERT(inp);
13915 * tv passed from common code is from either M_TSTMP_LRO or
13916 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present.
13918 rack = (struct tcp_rack *)tp->t_fb_ptr;
13919 if (m->m_flags & M_ACKCMP) {
13921 * All compressed ack's are ack's by definition so
13922 * remove any ack required flag and then do the processing.
13924 rack->rc_ack_required = 0;
13925 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv));
13927 if (m->m_flags & M_ACKCMP) {
13928 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp);
13930 cts = tcp_tv_to_usectick(tv);
13931 ms_cts = tcp_tv_to_mssectick(tv);
13932 nsegs = m->m_pkthdr.lro_nsegs;
13933 counter_u64_add(rack_proc_non_comp_ack, 1);
13934 thflags = tcp_get_flags(th);
13935 #ifdef TCP_ACCOUNTING
13937 if (thflags & TH_ACK)
13938 ts_val = get_cyclecount();
13940 if ((m->m_flags & M_TSTMP) ||
13941 (m->m_flags & M_TSTMP_LRO)) {
13942 mbuf_tstmp2timespec(m, &ts);
13943 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
13944 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
13946 rack->r_ctl.act_rcv_time = *tv;
13947 kern_prefetch(rack, &prev_state);
13950 * Unscale the window into a 32-bit value. For the SYN_SENT state
13951 * the scale is zero.
13953 tiwin = th->th_win << tp->snd_scale;
13954 #ifdef TCP_ACCOUNTING
13955 if (thflags & TH_ACK) {
13957 * We have a tradeoff here. We can either do what we are
13958 * doing i.e. pinning to this CPU and then doing the accounting
13959 * <or> we could do a critical enter, setup the rdtsc and cpu
13960 * as in below, and then validate we are on the same CPU on
13961 * exit. I have choosen to not do the critical enter since
13962 * that often will gain you a context switch, and instead lock
13963 * us (line above this if) to the same CPU with sched_pin(). This
13964 * means we may be context switched out for a higher priority
13965 * interupt but we won't be moved to another CPU.
13967 * If this occurs (which it won't very often since we most likely
13968 * are running this code in interupt context and only a higher
13969 * priority will bump us ... clock?) we will falsely add in
13970 * to the time the interupt processing time plus the ack processing
13971 * time. This is ok since its a rare event.
13973 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin,
13974 ctf_fixed_maxseg(tp));
13978 * Parse options on any incoming segment.
13980 memset(&to, 0, sizeof(to));
13981 tcp_dooptions(&to, (u_char *)(th + 1),
13982 (th->th_off << 2) - sizeof(struct tcphdr),
13983 (thflags & TH_SYN) ? TO_SYN : 0);
13984 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
13986 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
13989 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
13990 (tp->t_flags & TF_GPUTINPROG)) {
13992 * We have a goodput in progress
13993 * and we have entered a late state.
13994 * Do we have enough data in the sb
13995 * to handle the GPUT request?
13999 bytes = tp->gput_ack - tp->gput_seq;
14000 if (SEQ_GT(tp->gput_seq, tp->snd_una))
14001 bytes += tp->gput_seq - tp->snd_una;
14002 if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
14004 * There are not enough bytes in the socket
14005 * buffer that have been sent to cover this
14006 * measurement. Cancel it.
14008 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
14009 rack->r_ctl.rc_gp_srtt /*flex1*/,
14011 0, 0, 18, __LINE__, NULL, 0);
14012 tp->t_flags &= ~TF_GPUTINPROG;
14015 high_seq = th->th_ack;
14016 if (tcp_bblogging_on(rack->rc_tp)) {
14017 union tcp_log_stackspecific log;
14018 struct timeval ltv;
14019 #ifdef NETFLIX_HTTP_LOGGING
14020 struct http_sendfile_track *http_req;
14022 if (SEQ_GT(th->th_ack, tp->snd_una)) {
14023 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1));
14025 http_req = tcp_http_find_req_for_seq(tp, th->th_ack);
14028 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
14029 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
14030 if (rack->rack_no_prr == 0)
14031 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
14033 log.u_bbr.flex1 = 0;
14034 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
14035 log.u_bbr.use_lt_bw <<= 1;
14036 log.u_bbr.use_lt_bw |= rack->r_might_revert;
14037 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
14038 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14039 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
14040 log.u_bbr.flex3 = m->m_flags;
14041 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
14042 log.u_bbr.lost = thflags;
14043 log.u_bbr.pacing_gain = 0x1;
14044 #ifdef TCP_ACCOUNTING
14045 log.u_bbr.cwnd_gain = ack_val_set;
14047 log.u_bbr.flex7 = 2;
14048 if (m->m_flags & M_TSTMP) {
14049 /* Record the hardware timestamp if present */
14050 mbuf_tstmp2timespec(m, &ts);
14051 ltv.tv_sec = ts.tv_sec;
14052 ltv.tv_usec = ts.tv_nsec / 1000;
14053 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v);
14054 } else if (m->m_flags & M_TSTMP_LRO) {
14055 /* Record the LRO the arrival timestamp */
14056 mbuf_tstmp2timespec(m, &ts);
14057 ltv.tv_sec = ts.tv_sec;
14058 ltv.tv_usec = ts.tv_nsec / 1000;
14059 log.u_bbr.flex5 = tcp_tv_to_usectick(<v);
14061 log.u_bbr.timeStamp = tcp_get_usecs(<v);
14062 /* Log the rcv time */
14063 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp;
14064 #ifdef NETFLIX_HTTP_LOGGING
14065 log.u_bbr.applimited = tp->t_http_closed;
14066 log.u_bbr.applimited <<= 8;
14067 log.u_bbr.applimited |= tp->t_http_open;
14068 log.u_bbr.applimited <<= 8;
14069 log.u_bbr.applimited |= tp->t_http_req;
14071 /* Copy out any client req info */
14073 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
14075 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
14076 log.u_bbr.rttProp = http_req->timestamp;
14077 log.u_bbr.cur_del_rate = http_req->start;
14078 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
14079 log.u_bbr.flex8 |= 1;
14081 log.u_bbr.flex8 |= 2;
14082 log.u_bbr.bw_inuse = http_req->end;
14084 log.u_bbr.flex6 = http_req->start_seq;
14085 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
14086 log.u_bbr.flex8 |= 4;
14087 log.u_bbr.epoch = http_req->end_seq;
14091 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
14092 tlen, &log, true, <v);
14094 /* Remove ack required flag if set, we have one */
14095 if (thflags & TH_ACK)
14096 rack->rc_ack_required = 0;
14097 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
14101 goto done_with_input;
14104 * If a segment with the ACK-bit set arrives in the SYN-SENT state
14105 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9.
14107 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
14108 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
14109 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
14110 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
14111 #ifdef TCP_ACCOUNTING
14117 * If timestamps were negotiated during SYN/ACK and a
14118 * segment without a timestamp is received, silently drop
14119 * the segment, unless it is a RST segment or missing timestamps are
14121 * See section 3.2 of RFC 7323.
14123 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) &&
14124 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) {
14128 goto done_with_input;
14132 * Segment received on connection. Reset idle time and keep-alive
14133 * timer. XXX: This should be done after segment validation to
14134 * ignore broken/spoofed segs.
14136 if (tp->t_idle_reduce &&
14137 (tp->snd_max == tp->snd_una) &&
14138 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
14139 counter_u64_add(rack_input_idle_reduces, 1);
14140 rack_cc_after_idle(rack, tp);
14142 tp->t_rcvtime = ticks;
14144 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
14146 if (tiwin > rack->r_ctl.rc_high_rwnd)
14147 rack->r_ctl.rc_high_rwnd = tiwin;
14149 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
14150 * this to occur after we've validated the segment.
14152 if (tcp_ecn_input_segment(tp, thflags, tlen,
14153 tcp_packets_this_ack(tp, th->th_ack),
14155 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__);
14158 * If echoed timestamp is later than the current time, fall back to
14159 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
14160 * were used when this connection was established.
14162 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
14163 to.to_tsecr -= tp->ts_offset;
14164 if (TSTMP_GT(to.to_tsecr, ms_cts))
14169 * If its the first time in we need to take care of options and
14170 * verify we can do SACK for rack!
14172 if (rack->r_state == 0) {
14173 /* Should be init'd by rack_init() */
14174 KASSERT(rack->rc_inp != NULL,
14175 ("%s: rack->rc_inp unexpectedly NULL", __func__));
14176 if (rack->rc_inp == NULL) {
14177 rack->rc_inp = inp;
14181 * Process options only when we get SYN/ACK back. The SYN
14182 * case for incoming connections is handled in tcp_syncache.
14183 * According to RFC1323 the window field in a SYN (i.e., a
14184 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
14185 * this is traditional behavior, may need to be cleaned up.
14187 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
14188 /* Handle parallel SYN for ECN */
14189 tcp_ecn_input_parallel_syn(tp, thflags, iptos);
14190 if ((to.to_flags & TOF_SCALE) &&
14191 (tp->t_flags & TF_REQ_SCALE)) {
14192 tp->t_flags |= TF_RCVD_SCALE;
14193 tp->snd_scale = to.to_wscale;
14195 tp->t_flags &= ~TF_REQ_SCALE;
14197 * Initial send window. It will be updated with the
14198 * next incoming segment to the scaled value.
14200 tp->snd_wnd = th->th_win;
14201 rack_validate_fo_sendwin_up(tp, rack);
14202 if ((to.to_flags & TOF_TS) &&
14203 (tp->t_flags & TF_REQ_TSTMP)) {
14204 tp->t_flags |= TF_RCVD_TSTMP;
14205 tp->ts_recent = to.to_tsval;
14206 tp->ts_recent_age = cts;
14208 tp->t_flags &= ~TF_REQ_TSTMP;
14209 if (to.to_flags & TOF_MSS) {
14210 tcp_mss(tp, to.to_mss);
14212 if ((tp->t_flags & TF_SACK_PERMIT) &&
14213 (to.to_flags & TOF_SACKPERM) == 0)
14214 tp->t_flags &= ~TF_SACK_PERMIT;
14215 if (IS_FASTOPEN(tp->t_flags)) {
14216 if (to.to_flags & TOF_FASTOPEN) {
14219 if (to.to_flags & TOF_MSS)
14222 if ((inp->inp_vflag & INP_IPV6) != 0)
14226 tcp_fastopen_update_cache(tp, mss,
14227 to.to_tfo_len, to.to_tfo_cookie);
14229 tcp_fastopen_disable_path(tp);
14233 * At this point we are at the initial call. Here we decide
14234 * if we are doing RACK or not. We do this by seeing if
14235 * TF_SACK_PERMIT is set and the sack-not-required is clear.
14236 * The code now does do dup-ack counting so if you don't
14237 * switch back you won't get rack & TLP, but you will still
14241 if ((rack_sack_not_required == 0) &&
14242 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
14243 tcp_switch_back_to_default(tp);
14244 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen,
14246 #ifdef TCP_ACCOUNTING
14252 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
14254 if (thflags & TH_FIN)
14255 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
14256 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
14257 if ((rack->rc_gp_dyn_mul) &&
14258 (rack->use_fixed_rate == 0) &&
14259 (rack->rc_always_pace)) {
14260 /* Check in on probertt */
14261 rack_check_probe_rtt(rack, us_cts);
14263 rack_clear_rate_sample(rack);
14264 if ((rack->forced_ack) &&
14265 ((tcp_get_flags(th) & TH_RST) == 0)) {
14266 rack_handle_probe_response(rack, tiwin, us_cts);
14269 * This is the one exception case where we set the rack state
14270 * always. All other times (timers etc) we must have a rack-state
14271 * set (so we assure we have done the checks above for SACK).
14273 rack->r_ctl.rc_rcvtime = cts;
14274 if (rack->r_state != tp->t_state)
14275 rack_set_state(tp, rack);
14276 if (SEQ_GT(th->th_ack, tp->snd_una) &&
14277 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL)
14278 kern_prefetch(rsm, &prev_state);
14279 prev_state = rack->r_state;
14280 retval = (*rack->r_substate) (m, th, so,
14281 tp, &to, drop_hdrlen,
14282 tlen, tiwin, thflags, nxt_pkt, iptos);
14285 * If retval is 1 the tcb is unlocked and most likely the tp
14288 INP_WLOCK_ASSERT(inp);
14289 if ((rack->rc_gp_dyn_mul) &&
14290 (rack->rc_always_pace) &&
14291 (rack->use_fixed_rate == 0) &&
14292 rack->in_probe_rtt &&
14293 (rack->r_ctl.rc_time_probertt_starts == 0)) {
14295 * If we are going for target, lets recheck before
14298 rack_check_probe_rtt(rack, us_cts);
14300 if (rack->set_pacing_done_a_iw == 0) {
14301 /* How much has been acked? */
14302 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) {
14303 /* We have enough to set in the pacing segment size */
14304 rack->set_pacing_done_a_iw = 1;
14305 rack_set_pace_segments(tp, rack, __LINE__, NULL);
14308 tcp_rack_xmit_timer_commit(rack, tp);
14309 #ifdef TCP_ACCOUNTING
14311 * If we set the ack_val_se to what ack processing we are doing
14312 * we also want to track how many cycles we burned. Note
14313 * the bits after tcp_output we let be "free". This is because
14314 * we are also tracking the tcp_output times as well. Note the
14315 * use of 0xf here since we only have 11 counter (0 - 0xa) and
14316 * 0xf cannot be returned and is what we initialize it too to
14317 * indicate we are not doing the tabulations.
14319 if (ack_val_set != 0xf) {
14322 crtsc = get_cyclecount();
14323 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
14324 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val);
14328 if (nxt_pkt == 0) {
14329 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
14331 if (tcp_output(tp) < 0)
14335 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
14336 rack_free_trim(rack);
14338 /* Update any rounds needed */
14339 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
14340 union tcp_log_stackspecific log;
14343 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
14344 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14345 log.u_bbr.flex1 = high_seq;
14346 log.u_bbr.flex2 = rack->r_ctl.roundends;
14347 log.u_bbr.flex3 = rack->r_ctl.current_round;
14348 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround;
14349 log.u_bbr.flex8 = 9;
14350 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
14351 0, &log, false, NULL, NULL, 0, &tv);
14354 * The draft (v3) calls for us to use SEQ_GEQ, but that
14355 * causes issues when we are just going app limited. Lets
14356 * instead use SEQ_GT <or> where its equal but more data
14359 if ((SEQ_GT(tp->snd_una, rack->r_ctl.roundends)) ||
14360 ((tp->snd_una == rack->r_ctl.roundends) && SEQ_GT(tp->snd_max, tp->snd_una))) {
14361 rack->r_ctl.current_round++;
14362 rack->r_ctl.roundends = tp->snd_max;
14363 if (CC_ALGO(tp)->newround != NULL) {
14364 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round);
14367 if ((nxt_pkt == 0) &&
14368 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
14369 (SEQ_GT(tp->snd_max, tp->snd_una) ||
14370 (tp->t_flags & TF_DELACK) ||
14371 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
14372 (tp->t_state <= TCPS_CLOSING)))) {
14373 /* We could not send (probably in the hpts but stopped the timer earlier)? */
14374 if ((tp->snd_max == tp->snd_una) &&
14375 ((tp->t_flags & TF_DELACK) == 0) &&
14376 (tcp_in_hpts(rack->rc_inp)) &&
14377 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
14378 /* keep alive not needed if we are hptsi output yet */
14382 if (tcp_in_hpts(inp)) {
14383 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
14384 us_cts = tcp_get_usecs(NULL);
14385 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
14387 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
14390 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
14392 tcp_hpts_remove(inp);
14394 if (late && (did_out == 0)) {
14396 * We are late in the sending
14397 * and we did not call the output
14398 * (this probably should not happen).
14400 goto do_output_now;
14402 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
14405 } else if (nxt_pkt == 0) {
14406 /* Do we have the correct timer running? */
14407 rack_timer_audit(tp, rack, &so->so_snd);
14411 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs));
14413 rack->r_wanted_output = 0;
14415 #ifdef TCP_ACCOUNTING
14422 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
14423 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos)
14427 /* First lets see if we have old packets */
14428 if (tp->t_in_pkt) {
14429 if (ctf_do_queued_segments(so, tp, 1)) {
14434 if (m->m_flags & M_TSTMP_LRO) {
14435 mbuf_tstmp2timeval(m, &tv);
14437 /* Should not be should we kassert instead? */
14438 tcp_get_usecs(&tv);
14440 if (rack_do_segment_nounlock(m, th, so, tp,
14441 drop_hdrlen, tlen, iptos, 0, &tv) == 0) {
14442 INP_WUNLOCK(tptoinpcb(tp));
14446 struct rack_sendmap *
14447 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
14449 struct rack_sendmap *rsm = NULL;
14451 uint32_t srtt = 0, thresh = 0, ts_low = 0;
14453 /* Return the next guy to be re-transmitted */
14454 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
14457 if (tp->t_flags & TF_SENTFIN) {
14458 /* retran the end FIN? */
14461 /* ok lets look at this one */
14462 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
14463 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) {
14466 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
14469 rsm = rack_find_lowest_rsm(rack);
14474 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) &&
14475 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
14477 * No sack so we automatically do the 3 strikes and
14478 * retransmit (no rack timer would be started).
14483 if (rsm->r_flags & RACK_ACKED) {
14486 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) &&
14487 (rsm->r_dupack < DUP_ACK_THRESHOLD)) {
14488 /* Its not yet ready */
14491 srtt = rack_grab_rtt(tp, rack);
14492 idx = rsm->r_rtr_cnt - 1;
14493 ts_low = (uint32_t)rsm->r_tim_lastsent[idx];
14494 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
14495 if ((tsused == ts_low) ||
14496 (TSTMP_LT(tsused, ts_low))) {
14497 /* No time since sending */
14500 if ((tsused - ts_low) < thresh) {
14501 /* It has not been long enough yet */
14504 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
14505 ((rsm->r_flags & RACK_SACK_PASSED) &&
14506 (rack->sack_attack_disable == 0))) {
14508 * We have passed the dup-ack threshold <or>
14509 * a SACK has indicated this is missing.
14510 * Note that if you are a declared attacker
14511 * it is only the dup-ack threshold that
14512 * will cause retransmits.
14514 /* log retransmit reason */
14515 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1);
14516 rack->r_fast_output = 0;
14523 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
14524 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method,
14525 int line, struct rack_sendmap *rsm, uint8_t quality)
14527 if (tcp_bblogging_on(rack->rc_tp)) {
14528 union tcp_log_stackspecific log;
14531 memset(&log, 0, sizeof(log));
14532 log.u_bbr.flex1 = slot;
14533 log.u_bbr.flex2 = len;
14534 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs;
14535 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs;
14536 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss;
14537 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca;
14538 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data;
14539 log.u_bbr.use_lt_bw <<= 1;
14540 log.u_bbr.use_lt_bw |= rack->r_late;
14541 log.u_bbr.use_lt_bw <<= 1;
14542 log.u_bbr.use_lt_bw |= rack->r_early;
14543 log.u_bbr.use_lt_bw <<= 1;
14544 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
14545 log.u_bbr.use_lt_bw <<= 1;
14546 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
14547 log.u_bbr.use_lt_bw <<= 1;
14548 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
14549 log.u_bbr.use_lt_bw <<= 1;
14550 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
14551 log.u_bbr.use_lt_bw <<= 1;
14552 log.u_bbr.use_lt_bw |= rack->gp_ready;
14553 log.u_bbr.pkt_epoch = line;
14554 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed;
14555 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early;
14556 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec;
14557 log.u_bbr.bw_inuse = bw_est;
14558 log.u_bbr.delRate = bw;
14559 if (rack->r_ctl.gp_bw == 0)
14560 log.u_bbr.cur_del_rate = 0;
14562 log.u_bbr.cur_del_rate = rack_get_bw(rack);
14563 log.u_bbr.rttProp = len_time;
14564 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt;
14565 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit;
14566 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
14567 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) {
14568 /* We are in slow start */
14569 log.u_bbr.flex7 = 1;
14571 /* we are on congestion avoidance */
14572 log.u_bbr.flex7 = 0;
14574 log.u_bbr.flex8 = method;
14575 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14576 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14577 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec;
14578 log.u_bbr.cwnd_gain <<= 1;
14579 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
14580 log.u_bbr.cwnd_gain <<= 1;
14581 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
14582 log.u_bbr.bbr_substate = quality;
14583 TCP_LOG_EVENTP(rack->rc_tp, NULL,
14584 &rack->rc_inp->inp_socket->so_rcv,
14585 &rack->rc_inp->inp_socket->so_snd,
14586 BBR_LOG_HPTSI_CALC, 0,
14587 0, &log, false, &tv);
14592 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss)
14594 uint32_t new_tso, user_max;
14596 user_max = rack->rc_user_set_max_segs * mss;
14597 if (rack->rc_force_max_seg) {
14600 if (rack->use_fixed_rate &&
14601 ((rack->r_ctl.crte == NULL) ||
14602 (bw != rack->r_ctl.crte->rate))) {
14603 /* Use the user mss since we are not exactly matched */
14606 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL);
14607 if (new_tso > user_max)
14608 new_tso = user_max;
14613 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced)
14615 uint64_t lentim, fill_bw;
14617 /* Lets first see if we are full, if so continue with normal rate */
14618 rack->r_via_fill_cw = 0;
14619 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use)
14621 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd)
14623 if (rack->r_ctl.rc_last_us_rtt == 0)
14625 if (rack->rc_pace_fill_if_rttin_range &&
14626 (rack->r_ctl.rc_last_us_rtt >=
14627 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) {
14628 /* The rtt is huge, N * smallest, lets not fill */
14632 * first lets calculate the b/w based on the last us-rtt
14635 fill_bw = rack->r_ctl.cwnd_to_use;
14636 /* Take the rwnd if its smaller */
14637 if (fill_bw > rack->rc_tp->snd_wnd)
14638 fill_bw = rack->rc_tp->snd_wnd;
14639 if (rack->r_fill_less_agg) {
14641 * Now take away the inflight (this will reduce our
14642 * aggressiveness and yeah, if we get that much out in 1RTT
14643 * we will have had acks come back and still be behind).
14645 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14647 /* Now lets make it into a b/w */
14648 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC;
14649 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt;
14650 /* We are below the min b/w */
14652 *rate_wanted = fill_bw;
14653 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted))
14655 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap))
14656 fill_bw = rack->r_ctl.bw_rate_cap;
14657 rack->r_via_fill_cw = 1;
14658 if (rack->r_rack_hw_rate_caps &&
14659 (rack->r_ctl.crte != NULL)) {
14660 uint64_t high_rate;
14662 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
14663 if (fill_bw > high_rate) {
14664 /* We are capping bw at the highest rate table entry */
14665 if (*rate_wanted > high_rate) {
14666 /* The original rate was also capped */
14667 rack->r_via_fill_cw = 0;
14669 rack_log_hdwr_pacing(rack,
14670 fill_bw, high_rate, __LINE__,
14672 fill_bw = high_rate;
14676 } else if ((rack->r_ctl.crte == NULL) &&
14677 (rack->rack_hdrw_pacing == 0) &&
14678 (rack->rack_hdw_pace_ena) &&
14679 rack->r_rack_hw_rate_caps &&
14680 (rack->rack_attempt_hdwr_pace == 0) &&
14681 (rack->rc_inp->inp_route.ro_nh != NULL) &&
14682 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
14684 * Ok we may have a first attempt that is greater than our top rate
14687 uint64_t high_rate;
14689 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
14691 if (fill_bw > high_rate) {
14692 fill_bw = high_rate;
14699 * Ok fill_bw holds our mythical b/w to fill the cwnd
14700 * in a rtt, what does that time wise equate too?
14702 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC;
14704 *rate_wanted = fill_bw;
14705 if (non_paced || (lentim < slot)) {
14706 rack_log_pacing_delay_calc(rack, len, slot, fill_bw,
14707 0, lentim, 12, __LINE__, NULL, 0);
14708 return ((int32_t)lentim);
14714 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz)
14718 int can_start_hw_pacing = 1;
14721 if (rack->rc_always_pace == 0) {
14723 * We use the most optimistic possible cwnd/srtt for
14724 * sending calculations. This will make our
14725 * calculation anticipate getting more through
14726 * quicker then possible. But thats ok we don't want
14727 * the peer to have a gap in data sending.
14729 uint64_t cwnd, tr_perms = 0;
14730 int32_t reduce = 0;
14734 * We keep no precise pacing with the old method
14735 * instead we use the pacer to mitigate bursts.
14737 if (rack->r_ctl.rc_rack_min_rtt)
14738 srtt = rack->r_ctl.rc_rack_min_rtt;
14740 srtt = max(tp->t_srtt, 1);
14741 if (rack->r_ctl.rc_rack_largest_cwnd)
14742 cwnd = rack->r_ctl.rc_rack_largest_cwnd;
14744 cwnd = rack->r_ctl.cwnd_to_use;
14745 /* Inflate cwnd by 1000 so srtt of usecs is in ms */
14746 tr_perms = (cwnd * 1000) / srtt;
14747 if (tr_perms == 0) {
14748 tr_perms = ctf_fixed_maxseg(tp);
14751 * Calculate how long this will take to drain, if
14752 * the calculation comes out to zero, thats ok we
14753 * will use send_a_lot to possibly spin around for
14754 * more increasing tot_len_this_send to the point
14755 * that its going to require a pace, or we hit the
14756 * cwnd. Which in that case we are just waiting for
14759 slot = len / tr_perms;
14760 /* Now do we reduce the time so we don't run dry? */
14761 if (slot && rack_slot_reduction) {
14762 reduce = (slot / rack_slot_reduction);
14763 if (reduce < slot) {
14768 slot *= HPTS_USEC_IN_MSEC;
14769 if (rack->rc_pace_to_cwnd) {
14770 uint64_t rate_wanted = 0;
14772 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1);
14773 rack->rc_ack_can_sendout_data = 1;
14774 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0);
14776 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0);
14778 uint64_t bw_est, res, lentim, rate_wanted;
14779 uint32_t orig_val, segs, oh;
14783 if ((rack->r_rr_config == 1) && rsm) {
14784 return (rack->r_ctl.rc_min_to);
14786 if (rack->use_fixed_rate) {
14787 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack);
14788 } else if ((rack->r_ctl.init_rate == 0) &&
14789 #ifdef NETFLIX_PEAKRATE
14790 (rack->rc_tp->t_maxpeakrate == 0) &&
14792 (rack->r_ctl.gp_bw == 0)) {
14793 /* no way to yet do an estimate */
14794 bw_est = rate_wanted = 0;
14796 bw_est = rack_get_bw(rack);
14797 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped);
14799 if ((bw_est == 0) || (rate_wanted == 0) ||
14800 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) {
14802 * No way yet to make a b/w estimate or
14803 * our raise is set incorrectly.
14807 /* We need to account for all the overheads */
14808 segs = (len + segsiz - 1) / segsiz;
14810 * We need the diff between 1514 bytes (e-mtu with e-hdr)
14811 * and how much data we put in each packet. Yes this
14812 * means we may be off if we are larger than 1500 bytes
14813 * or smaller. But this just makes us more conservative.
14815 if (rack_hw_rate_min &&
14816 (bw_est < rack_hw_rate_min))
14817 can_start_hw_pacing = 0;
14818 if (ETHERNET_SEGMENT_SIZE > segsiz)
14819 oh = ETHERNET_SEGMENT_SIZE - segsiz;
14823 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC;
14824 res = lentim / rate_wanted;
14825 slot = (uint32_t)res;
14826 orig_val = rack->r_ctl.rc_pace_max_segs;
14827 if (rack->r_ctl.crte == NULL) {
14829 * Only do this if we are not hardware pacing
14830 * since if we are doing hw-pacing below we will
14831 * set make a call after setting up or changing
14834 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
14835 } else if (rack->rc_inp->inp_snd_tag == NULL) {
14837 * We lost our rate somehow, this can happen
14838 * if the interface changed underneath us.
14840 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
14841 rack->r_ctl.crte = NULL;
14842 /* Lets re-allow attempting to setup pacing */
14843 rack->rack_hdrw_pacing = 0;
14844 rack->rack_attempt_hdwr_pace = 0;
14845 rack_log_hdwr_pacing(rack,
14846 rate_wanted, bw_est, __LINE__,
14849 /* Did we change the TSO size, if so log it */
14850 if (rack->r_ctl.rc_pace_max_segs != orig_val)
14851 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL, 0);
14852 prev_fill = rack->r_via_fill_cw;
14853 if ((rack->rc_pace_to_cwnd) &&
14855 (rack->use_fixed_rate == 0) &&
14856 (rack->in_probe_rtt == 0) &&
14857 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) {
14859 * We want to pace at our rate *or* faster to
14860 * fill the cwnd to the max if its not full.
14862 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0);
14864 if ((rack->rc_inp->inp_route.ro_nh != NULL) &&
14865 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
14866 if ((rack->rack_hdw_pace_ena) &&
14867 (can_start_hw_pacing > 0) &&
14868 (rack->rack_hdrw_pacing == 0) &&
14869 (rack->rack_attempt_hdwr_pace == 0)) {
14871 * Lets attempt to turn on hardware pacing
14874 rack->rack_attempt_hdwr_pace = 1;
14875 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp,
14876 rack->rc_inp->inp_route.ro_nh->nh_ifp,
14879 &err, &rack->r_ctl.crte_prev_rate);
14880 if (rack->r_ctl.crte) {
14881 rack->rack_hdrw_pacing = 1;
14882 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz,
14883 0, rack->r_ctl.crte,
14885 rack_log_hdwr_pacing(rack,
14886 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
14888 rack->r_ctl.last_hw_bw_req = rate_wanted;
14890 counter_u64_add(rack_hw_pace_init_fail, 1);
14892 } else if (rack->rack_hdrw_pacing &&
14893 (rack->r_ctl.last_hw_bw_req != rate_wanted)) {
14894 /* Do we need to adjust our rate? */
14895 const struct tcp_hwrate_limit_table *nrte;
14897 if (rack->r_up_only &&
14898 (rate_wanted < rack->r_ctl.crte->rate)) {
14900 * We have four possible states here
14901 * having to do with the previous time
14903 * previous | this-time
14904 * A) 0 | 0 -- fill_cw not in the picture
14905 * B) 1 | 0 -- we were doing a fill-cw but now are not
14906 * C) 1 | 1 -- all rates from fill_cw
14907 * D) 0 | 1 -- we were doing non-fill and now we are filling
14909 * For case A, C and D we don't allow a drop. But for
14910 * case B where we now our on our steady rate we do
14914 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0)))
14917 if ((rate_wanted > rack->r_ctl.crte->rate) ||
14918 (rate_wanted <= rack->r_ctl.crte_prev_rate)) {
14919 if (rack_hw_rate_to_low &&
14920 (bw_est < rack_hw_rate_to_low)) {
14922 * The pacing rate is too low for hardware, but
14923 * do allow hardware pacing to be restarted.
14925 rack_log_hdwr_pacing(rack,
14926 bw_est, rack->r_ctl.crte->rate, __LINE__,
14928 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
14929 rack->r_ctl.crte = NULL;
14930 rack->rack_attempt_hdwr_pace = 0;
14931 rack->rack_hdrw_pacing = 0;
14932 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
14935 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte,
14937 rack->rc_inp->inp_route.ro_nh->nh_ifp,
14940 &err, &rack->r_ctl.crte_prev_rate);
14941 if (nrte == NULL) {
14942 /* Lost the rate */
14943 rack->rack_hdrw_pacing = 0;
14944 rack->r_ctl.crte = NULL;
14945 rack_log_hdwr_pacing(rack,
14946 rate_wanted, 0, __LINE__,
14948 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
14949 counter_u64_add(rack_hw_pace_lost, 1);
14950 } else if (nrte != rack->r_ctl.crte) {
14951 rack->r_ctl.crte = nrte;
14952 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted,
14956 rack_log_hdwr_pacing(rack,
14957 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
14959 rack->r_ctl.last_hw_bw_req = rate_wanted;
14962 /* We just need to adjust the segment size */
14963 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
14964 rack_log_hdwr_pacing(rack,
14965 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
14967 rack->r_ctl.last_hw_bw_req = rate_wanted;
14971 if ((rack->r_ctl.crte != NULL) &&
14972 (rack->r_ctl.crte->rate == rate_wanted)) {
14974 * We need to add a extra if the rates
14975 * are exactly matched. The idea is
14976 * we want the software to make sure the
14977 * queue is empty before adding more, this
14978 * gives us N MSS extra pace times where
14981 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots);
14984 if (rack_limit_time_with_srtt &&
14985 (rack->use_fixed_rate == 0) &&
14986 #ifdef NETFLIX_PEAKRATE
14987 (rack->rc_tp->t_maxpeakrate == 0) &&
14989 (rack->rack_hdrw_pacing == 0)) {
14991 * Sanity check, we do not allow the pacing delay
14992 * to be longer than the SRTT of the path. If it is
14993 * a slow path, then adding a packet should increase
14994 * the RTT and compensate for this i.e. the srtt will
14995 * be greater so the allowed pacing time will be greater.
14997 * Note this restriction is not for where a peak rate
14998 * is set, we are doing fixed pacing or hardware pacing.
15000 if (rack->rc_tp->t_srtt)
15001 srtt = rack->rc_tp->t_srtt;
15003 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */
15004 if (srtt < (uint64_t)slot) {
15005 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0);
15009 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0);
15011 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) {
15013 * If this rate is seeing enobufs when it
15014 * goes to send then either the nic is out
15015 * of gas or we are mis-estimating the time
15016 * somehow and not letting the queue empty
15017 * completely. Lets add to the pacing time.
15019 int hw_boost_delay;
15021 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult;
15022 if (hw_boost_delay > rack_enobuf_hw_max)
15023 hw_boost_delay = rack_enobuf_hw_max;
15024 else if (hw_boost_delay < rack_enobuf_hw_min)
15025 hw_boost_delay = rack_enobuf_hw_min;
15026 slot += hw_boost_delay;
15032 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack,
15033 tcp_seq startseq, uint32_t sb_offset)
15035 struct rack_sendmap *my_rsm = NULL;
15036 struct rack_sendmap fe;
15038 if (tp->t_state < TCPS_ESTABLISHED) {
15040 * We don't start any measurements if we are
15041 * not at least established.
15045 if (tp->t_state >= TCPS_FIN_WAIT_1) {
15047 * We will get no more data into the SB
15048 * this means we need to have the data available
15049 * before we start a measurement.
15052 if (sbavail(&tptosocket(tp)->so_snd) <
15053 max(rc_init_window(rack),
15054 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) {
15055 /* Nope not enough data */
15059 tp->t_flags |= TF_GPUTINPROG;
15060 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
15061 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
15062 tp->gput_seq = startseq;
15063 rack->app_limited_needs_set = 0;
15064 if (rack->in_probe_rtt)
15065 rack->measure_saw_probe_rtt = 1;
15066 else if ((rack->measure_saw_probe_rtt) &&
15067 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
15068 rack->measure_saw_probe_rtt = 0;
15069 if (rack->rc_gp_filled)
15070 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
15072 /* Special case initial measurement */
15075 tp->gput_ts = tcp_get_usecs(&tv);
15076 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
15079 * We take a guess out into the future,
15080 * if we have no measurement and no
15081 * initial rate, we measure the first
15082 * initial-windows worth of data to
15083 * speed up getting some GP measurement and
15084 * thus start pacing.
15086 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) {
15087 rack->app_limited_needs_set = 1;
15088 tp->gput_ack = startseq + max(rc_init_window(rack),
15089 (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
15090 rack_log_pacing_delay_calc(rack,
15095 rack->r_ctl.rc_app_limited_cnt,
15097 __LINE__, NULL, 0);
15102 * We are out somewhere in the sb
15103 * can we use the already outstanding data?
15105 if (rack->r_ctl.rc_app_limited_cnt == 0) {
15107 * Yes first one is good and in this case
15108 * the tp->gput_ts is correctly set based on
15109 * the last ack that arrived (no need to
15110 * set things up when an ack comes in).
15112 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
15113 if ((my_rsm == NULL) ||
15114 (my_rsm->r_rtr_cnt != 1)) {
15115 /* retransmission? */
15119 if (rack->r_ctl.rc_first_appl == NULL) {
15121 * If rc_first_appl is NULL
15122 * then the cnt should be 0.
15123 * This is probably an error, maybe
15124 * a KASSERT would be approprate.
15129 * If we have a marker pointer to the last one that is
15130 * app limited we can use that, but we need to set
15131 * things up so that when it gets ack'ed we record
15132 * the ack time (if its not already acked).
15134 rack->app_limited_needs_set = 1;
15136 * We want to get to the rsm that is either
15137 * next with space i.e. over 1 MSS or the one
15138 * after that (after the app-limited).
15140 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
15141 rack->r_ctl.rc_first_appl);
15143 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp))
15144 /* Have to use the next one */
15145 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
15148 /* Use after the first MSS of it is acked */
15149 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp);
15153 if ((my_rsm == NULL) ||
15154 (my_rsm->r_rtr_cnt != 1)) {
15156 * Either its a retransmit or
15157 * the last is the app-limited one.
15162 tp->gput_seq = my_rsm->r_start;
15164 if (my_rsm->r_flags & RACK_ACKED) {
15166 * This one has been acked use the arrival ack time
15168 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
15169 rack->app_limited_needs_set = 0;
15171 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)];
15172 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
15173 rack_log_pacing_delay_calc(rack,
15178 rack->r_ctl.rc_app_limited_cnt,
15180 __LINE__, NULL, 0);
15186 * We don't know how long we may have been
15187 * idle or if this is the first-send. Lets
15188 * setup the flag so we will trim off
15189 * the first ack'd data so we get a true
15192 rack->app_limited_needs_set = 1;
15193 tp->gput_ack = startseq + rack_get_measure_window(tp, rack);
15194 /* Find this guy so we can pull the send time */
15195 fe.r_start = startseq;
15196 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
15198 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)];
15199 if (my_rsm->r_flags & RACK_ACKED) {
15201 * Unlikely since its probably what was
15202 * just transmitted (but I am paranoid).
15204 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
15205 rack->app_limited_needs_set = 0;
15207 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) {
15208 /* This also is unlikely */
15209 tp->gput_seq = my_rsm->r_start;
15213 * TSNH unless we have some send-map limit,
15214 * and even at that it should not be hitting
15215 * that limit (we should have stopped sending).
15220 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
15222 rack_log_pacing_delay_calc(rack,
15227 rack->r_ctl.rc_app_limited_cnt,
15228 9, __LINE__, NULL, 0);
15231 static inline uint32_t
15232 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use,
15233 uint32_t avail, int32_t sb_offset)
15238 if (tp->snd_wnd > cwnd_to_use)
15239 sendwin = cwnd_to_use;
15241 sendwin = tp->snd_wnd;
15242 if (ctf_outstanding(tp) >= tp->snd_wnd) {
15243 /* We never want to go over our peers rcv-window */
15248 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
15249 if (flight >= sendwin) {
15251 * We have in flight what we are allowed by cwnd (if
15252 * it was rwnd blocking it would have hit above out
15257 len = sendwin - flight;
15258 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) {
15259 /* We would send too much (beyond the rwnd) */
15260 len = tp->snd_wnd - ctf_outstanding(tp);
15262 if ((len + sb_offset) > avail) {
15264 * We don't have that much in the SB, how much is
15267 len = avail - sb_offset;
15274 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags,
15275 unsigned ipoptlen, int32_t orig_len, int32_t len, int error,
15276 int rsm_is_null, int optlen, int line, uint16_t mode)
15278 if (tcp_bblogging_on(rack->rc_tp)) {
15279 union tcp_log_stackspecific log;
15282 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15283 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
15284 log.u_bbr.flex1 = error;
15285 log.u_bbr.flex2 = flags;
15286 log.u_bbr.flex3 = rsm_is_null;
15287 log.u_bbr.flex4 = ipoptlen;
15288 log.u_bbr.flex5 = tp->rcv_numsacks;
15289 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
15290 log.u_bbr.flex7 = optlen;
15291 log.u_bbr.flex8 = rack->r_fsb_inited;
15292 log.u_bbr.applimited = rack->r_fast_output;
15293 log.u_bbr.bw_inuse = rack_get_bw(rack);
15294 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
15295 log.u_bbr.cwnd_gain = mode;
15296 log.u_bbr.pkts_out = orig_len;
15297 log.u_bbr.lt_epoch = len;
15298 log.u_bbr.delivered = line;
15299 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
15300 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
15301 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0,
15302 len, &log, false, NULL, NULL, 0, &tv);
15307 static struct mbuf *
15308 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen,
15309 struct rack_fast_send_blk *fsb,
15310 int32_t seglimit, int32_t segsize, int hw_tls)
15313 struct ktls_session *tls, *ntls;
15315 struct mbuf *start;
15318 struct mbuf *m, *n, **np, *smb;
15321 int32_t len = *plen;
15323 int32_t len_cp = 0;
15324 uint32_t mlen, frags;
15326 soff = off = the_off;
15331 if (hw_tls && (m->m_flags & M_EXTPG))
15332 tls = m->m_epg_tls;
15346 if (m->m_flags & M_EXTPG)
15347 ntls = m->m_epg_tls;
15352 * Avoid mixing TLS records with handshake
15353 * data or TLS records from different
15363 mlen = min(len, m->m_len - off);
15366 * For M_EXTPG mbufs, add 3 segments
15367 * + 1 in case we are crossing page boundaries
15368 * + 2 in case the TLS hdr/trailer are used
15369 * It is cheaper to just add the segments
15370 * than it is to take the cache miss to look
15371 * at the mbuf ext_pgs state in detail.
15373 if (m->m_flags & M_EXTPG) {
15374 fragsize = min(segsize, PAGE_SIZE);
15377 fragsize = segsize;
15381 /* Break if we really can't fit anymore. */
15382 if ((frags + 1) >= seglimit) {
15388 * Reduce size if you can't copy the whole
15389 * mbuf. If we can't copy the whole mbuf, also
15390 * adjust len so the loop will end after this
15393 if ((frags + howmany(mlen, fragsize)) >= seglimit) {
15394 mlen = (seglimit - frags - 1) * fragsize;
15396 *plen = len_cp + len;
15398 frags += howmany(mlen, fragsize);
15402 KASSERT(seglimit > 0,
15403 ("%s: seglimit went too low", __func__));
15405 n = m_get(M_NOWAIT, m->m_type);
15411 len_cp += n->m_len;
15412 if (m->m_flags & (M_EXT|M_EXTPG)) {
15413 n->m_data = m->m_data + off;
15416 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
15423 if (len || (soff == smb->m_len)) {
15425 * We have more so we move forward or
15426 * we have consumed the entire mbuf and
15427 * len has fell to 0.
15439 * Save off the size of the mbuf. We do
15440 * this so that we can recognize when it
15441 * has been trimmed by sbcut() as acks
15444 fsb->o_m_len = smb->m_len;
15447 * This is the case where the next mbuf went to NULL. This
15448 * means with this copy we have sent everything in the sb.
15449 * In theory we could clear the fast_output flag, but lets
15450 * not since its possible that we could get more added
15451 * and acks that call the extend function which would let
15466 * This is a copy of m_copym(), taking the TSO segment size/limit
15467 * constraints into account, and advancing the sndptr as it goes.
15469 static struct mbuf *
15470 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen,
15471 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff)
15473 struct mbuf *m, *n;
15476 soff = rack->r_ctl.fsb.off;
15477 m = rack->r_ctl.fsb.m;
15478 if (rack->r_ctl.fsb.o_m_len > m->m_len) {
15480 * The mbuf had the front of it chopped off by an ack
15481 * we need to adjust the soff/off by that difference.
15485 delta = rack->r_ctl.fsb.o_m_len - m->m_len;
15487 } else if (rack->r_ctl.fsb.o_m_len < m->m_len) {
15489 * The mbuf was expanded probably by
15490 * a m_compress. Just update o_m_len.
15492 rack->r_ctl.fsb.o_m_len = m->m_len;
15494 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff));
15495 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen));
15496 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?",
15498 rack, *plen, m, m->m_len));
15499 /* Save off the right location before we copy and advance */
15501 *s_mb = rack->r_ctl.fsb.m;
15502 n = rack_fo_base_copym(m, soff, plen,
15504 seglimit, segsize, rack->r_ctl.fsb.hw_tls);
15509 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm,
15510 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp)
15513 * Enter the fast retransmit path. We are given that a sched_pin is
15514 * in place (if accounting is compliled in) and the cycle count taken
15515 * at the entry is in the ts_val. The concept her is that the rsm
15516 * now holds the mbuf offsets and such so we can directly transmit
15517 * without a lot of overhead, the len field is already set for
15518 * us to prohibit us from sending too much (usually its 1MSS).
15520 struct ip *ip = NULL;
15521 struct udphdr *udp = NULL;
15522 struct tcphdr *th = NULL;
15523 struct mbuf *m = NULL;
15526 struct tcp_log_buffer *lgb;
15527 #ifdef TCP_ACCOUNTING
15532 u_char opt[TCP_MAXOLEN];
15533 uint32_t hdrlen, optlen;
15534 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0;
15536 uint32_t if_hw_tsomaxsegcount = 0, startseq;
15537 uint32_t if_hw_tsomaxsegsize;
15540 struct ip6_hdr *ip6 = NULL;
15542 if (rack->r_is_v6) {
15543 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
15544 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
15548 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
15549 hdrlen = sizeof(struct tcpiphdr);
15551 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
15555 /* Its a TLP add the flag, it may already be there but be sure */
15556 rsm->r_flags |= RACK_TLP;
15558 /* If it was a TLP it is not not on this retransmit */
15559 rsm->r_flags &= ~RACK_TLP;
15561 startseq = rsm->r_start;
15562 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
15563 inp = rack->rc_inp;
15565 flags = tcp_outflags[tp->t_state];
15566 if (flags & (TH_SYN|TH_RST)) {
15569 if (rsm->r_flags & RACK_HAS_FIN) {
15570 /* We can't send a FIN here */
15573 if (flags & TH_FIN) {
15574 /* We never send a FIN */
15577 if (tp->t_flags & TF_RCVD_TSTMP) {
15578 to.to_tsval = ms_cts + tp->ts_offset;
15579 to.to_tsecr = tp->ts_recent;
15580 to.to_flags = TOF_TS;
15582 optlen = tcp_addoptions(&to, opt);
15584 udp = rack->r_ctl.fsb.udp;
15586 hdrlen += sizeof(struct udphdr);
15587 if (rack->r_ctl.rc_pace_max_segs)
15588 max_val = rack->r_ctl.rc_pace_max_segs;
15589 else if (rack->rc_user_set_max_segs)
15590 max_val = rack->rc_user_set_max_segs * segsiz;
15593 if ((tp->t_flags & TF_TSO) &&
15599 if (MHLEN < hdrlen + max_linkhdr)
15600 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
15603 m = m_gethdr(M_NOWAIT, MT_DATA);
15606 m->m_data += max_linkhdr;
15608 th = rack->r_ctl.fsb.th;
15609 /* Establish the len to send */
15612 if ((tso) && (len + optlen > tp->t_maxseg)) {
15613 uint32_t if_hw_tsomax;
15616 /* extract TSO information */
15617 if_hw_tsomax = tp->t_tsomax;
15618 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
15619 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
15621 * Check if we should limit by maximum payload
15624 if (if_hw_tsomax != 0) {
15625 /* compute maximum TSO length */
15626 max_len = (if_hw_tsomax - hdrlen -
15628 if (max_len <= 0) {
15630 } else if (len > max_len) {
15634 if (len <= segsiz) {
15636 * In case there are too many small fragments don't
15644 if ((tso == 0) && (len > segsiz))
15647 (len <= MHLEN - hdrlen - max_linkhdr)) {
15650 th->th_seq = htonl(rsm->r_start);
15651 th->th_ack = htonl(tp->rcv_nxt);
15653 * The PUSH bit should only be applied
15654 * if the full retransmission is made. If
15655 * we are sending less than this is the
15656 * left hand edge and should not have
15659 if ((rsm->r_flags & RACK_HAD_PUSH) &&
15660 (len == (rsm->r_end - rsm->r_start)))
15662 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
15663 if (th->th_win == 0) {
15664 tp->t_sndzerowin++;
15665 tp->t_flags |= TF_RXWIN0SENT;
15667 tp->t_flags &= ~TF_RXWIN0SENT;
15668 if (rsm->r_flags & RACK_TLP) {
15670 * TLP should not count in retran count, but
15673 counter_u64_add(rack_tlp_retran, 1);
15674 counter_u64_add(rack_tlp_retran_bytes, len);
15676 tp->t_sndrexmitpack++;
15677 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
15678 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
15681 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
15684 if (rsm->m == NULL)
15686 if (rsm->orig_m_len != rsm->m->m_len) {
15687 /* Fix up the orig_m_len and possibly the mbuf offset */
15688 rack_adjust_orig_mlen(rsm);
15690 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls);
15691 if (len <= segsiz) {
15693 * Must have ran out of mbufs for the copy
15694 * shorten it to no longer need tso. Lets
15695 * not put on sendalot since we are low on
15700 if ((m->m_next == NULL) || (len <= 0)){
15705 ulen = hdrlen + len - sizeof(struct ip6_hdr);
15707 ulen = hdrlen + len - sizeof(struct ip);
15708 udp->uh_ulen = htons(ulen);
15710 m->m_pkthdr.rcvif = (struct ifnet *)0;
15711 if (TCPS_HAVERCVDSYN(tp->t_state) &&
15712 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
15713 int ect = tcp_ecn_output_established(tp, &flags, len, true);
15714 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
15715 (tp->t_flags2 & TF2_ECN_SND_ECE))
15716 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
15718 if (rack->r_is_v6) {
15719 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
15720 ip6->ip6_flow |= htonl(ect << 20);
15725 ip->ip_tos &= ~IPTOS_ECN_MASK;
15729 tcp_set_flags(th, flags);
15730 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
15732 if (rack->r_is_v6) {
15734 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
15735 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15736 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
15737 th->th_sum = htons(0);
15738 UDPSTAT_INC(udps_opackets);
15740 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
15741 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15742 th->th_sum = in6_cksum_pseudo(ip6,
15743 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
15748 #if defined(INET6) && defined(INET)
15754 m->m_pkthdr.csum_flags = CSUM_UDP;
15755 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15756 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
15757 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
15758 th->th_sum = htons(0);
15759 UDPSTAT_INC(udps_opackets);
15761 m->m_pkthdr.csum_flags = CSUM_TCP;
15762 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15763 th->th_sum = in_pseudo(ip->ip_src.s_addr,
15764 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
15765 IPPROTO_TCP + len + optlen));
15767 /* IP version must be set here for ipv4/ipv6 checking later */
15768 KASSERT(ip->ip_v == IPVERSION,
15769 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
15773 KASSERT(len > tp->t_maxseg - optlen,
15774 ("%s: len <= tso_segsz tp:%p", __func__, tp));
15775 m->m_pkthdr.csum_flags |= CSUM_TSO;
15776 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
15779 if (rack->r_is_v6) {
15780 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
15781 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
15782 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
15783 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15785 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15788 #if defined(INET) && defined(INET6)
15793 ip->ip_len = htons(m->m_pkthdr.len);
15794 ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
15795 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
15796 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15797 if (tp->t_port == 0 || len < V_tcp_minmss) {
15798 ip->ip_off |= htons(IP_DF);
15801 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15805 /* Time to copy in our header */
15806 cpto = mtod(m, uint8_t *);
15807 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
15808 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
15810 bcopy(opt, th + 1, optlen);
15811 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
15813 th->th_off = sizeof(struct tcphdr) >> 2;
15815 if (tcp_bblogging_on(rack->rc_tp)) {
15816 union tcp_log_stackspecific log;
15818 if (rsm->r_flags & RACK_RWND_COLLAPSED) {
15819 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm);
15820 counter_u64_add(rack_collapsed_win_rxt, 1);
15821 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start));
15823 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15824 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
15825 if (rack->rack_no_prr)
15826 log.u_bbr.flex1 = 0;
15828 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
15829 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
15830 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
15831 log.u_bbr.flex4 = max_val;
15832 log.u_bbr.flex5 = 0;
15833 /* Save off the early/late values */
15834 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
15835 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
15836 log.u_bbr.bw_inuse = rack_get_bw(rack);
15837 if (doing_tlp == 0)
15838 log.u_bbr.flex8 = 1;
15840 log.u_bbr.flex8 = 2;
15841 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
15842 log.u_bbr.flex7 = 55;
15843 log.u_bbr.pkts_out = tp->t_maxseg;
15844 log.u_bbr.timeStamp = cts;
15845 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
15846 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
15847 log.u_bbr.delivered = 0;
15848 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
15849 len, &log, false, NULL, NULL, 0, tv);
15853 if (rack->r_is_v6) {
15854 error = ip6_output(m, NULL,
15856 0, NULL, NULL, inp);
15862 error = ip_output(m, NULL,
15869 lgb->tlb_errno = error;
15875 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv),
15876 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls);
15877 if (doing_tlp && (rack->fast_rsm_hack == 0)) {
15878 rack->rc_tlp_in_progress = 1;
15879 rack->r_ctl.rc_tlp_cnt_out++;
15882 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls);
15884 rack->rc_last_sent_tlp_past_cumack = 0;
15885 rack->rc_last_sent_tlp_seq_valid = 1;
15886 rack->r_ctl.last_sent_tlp_seq = rsm->r_start;
15887 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start;
15890 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
15891 rack->forced_ack = 0; /* If we send something zap the FA flag */
15892 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
15893 rack->r_ctl.retran_during_recovery += len;
15897 idx = (len / segsiz) + 3;
15898 if (idx >= TCP_MSS_ACCT_ATIMER)
15899 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
15901 counter_u64_add(rack_out_size[idx], 1);
15903 if (tp->t_rtttime == 0) {
15904 tp->t_rtttime = ticks;
15905 tp->t_rtseq = startseq;
15906 KMOD_TCPSTAT_INC(tcps_segstimed);
15908 counter_u64_add(rack_fto_rsm_send, 1);
15909 if (error && (error == ENOBUFS)) {
15910 if (rack->r_ctl.crte != NULL) {
15911 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF);
15913 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF);
15914 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
15915 if (rack->rc_enobuf < 0x7f)
15917 if (slot < (10 * HPTS_USEC_IN_MSEC))
15918 slot = 10 * HPTS_USEC_IN_MSEC;
15920 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz);
15922 (rack->rc_always_pace == 0) ||
15923 (rack->r_rr_config == 1)) {
15925 * We have no pacing set or we
15926 * are using old-style rack or
15927 * we are overridden to use the old 1ms pacing.
15929 slot = rack->r_ctl.rc_min_to;
15931 rack_start_hpts_timer(rack, tp, cts, slot, len, 0);
15932 #ifdef TCP_ACCOUNTING
15933 crtsc = get_cyclecount();
15934 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15935 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
15937 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15938 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
15940 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15941 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz);
15953 rack_sndbuf_autoscale(struct tcp_rack *rack)
15956 * Automatic sizing of send socket buffer. Often the send buffer
15957 * size is not optimally adjusted to the actual network conditions
15958 * at hand (delay bandwidth product). Setting the buffer size too
15959 * small limits throughput on links with high bandwidth and high
15960 * delay (eg. trans-continental/oceanic links). Setting the
15961 * buffer size too big consumes too much real kernel memory,
15962 * especially with many connections on busy servers.
15964 * The criteria to step up the send buffer one notch are:
15965 * 1. receive window of remote host is larger than send buffer
15966 * (with a fudge factor of 5/4th);
15967 * 2. send buffer is filled to 7/8th with data (so we actually
15968 * have data to make use of it);
15969 * 3. send buffer fill has not hit maximal automatic size;
15970 * 4. our send window (slow start and cogestion controlled) is
15971 * larger than sent but unacknowledged data in send buffer.
15973 * Note that the rack version moves things much faster since
15974 * we want to avoid hitting cache lines in the rack_fast_output()
15975 * path so this is called much less often and thus moves
15976 * the SB forward by a percentage.
15980 uint32_t sendwin, scaleup;
15983 so = rack->rc_inp->inp_socket;
15984 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd);
15985 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
15986 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
15987 sbused(&so->so_snd) >=
15988 (so->so_snd.sb_hiwat / 8 * 7) &&
15989 sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
15990 sendwin >= (sbused(&so->so_snd) -
15991 (tp->snd_nxt - tp->snd_una))) {
15992 if (rack_autosndbuf_inc)
15993 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100;
15995 scaleup = V_tcp_autosndbuf_inc;
15996 if (scaleup < V_tcp_autosndbuf_inc)
15997 scaleup = V_tcp_autosndbuf_inc;
15998 scaleup += so->so_snd.sb_hiwat;
15999 if (scaleup > V_tcp_autosndbuf_max)
16000 scaleup = V_tcp_autosndbuf_max;
16001 if (!sbreserve_locked(so, SO_SND, scaleup, curthread))
16002 so->so_snd.sb_flags &= ~SB_AUTOSIZE;
16008 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val,
16009 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err)
16012 * Enter to do fast output. We are given that the sched_pin is
16013 * in place (if accounting is compiled in) and the cycle count taken
16014 * at entry is in place in ts_val. The idea here is that
16015 * we know how many more bytes needs to be sent (presumably either
16016 * during pacing or to fill the cwnd and that was greater than
16017 * the max-burst). We have how much to send and all the info we
16018 * need to just send.
16021 struct ip *ip = NULL;
16023 struct udphdr *udp = NULL;
16024 struct tcphdr *th = NULL;
16025 struct mbuf *m, *s_mb;
16028 struct tcp_log_buffer *lgb;
16029 #ifdef TCP_ACCOUNTING
16033 u_char opt[TCP_MAXOLEN];
16034 uint32_t hdrlen, optlen;
16035 #ifdef TCP_ACCOUNTING
16038 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0;
16041 uint32_t if_hw_tsomaxsegcount = 0, startseq;
16042 uint32_t if_hw_tsomaxsegsize;
16043 uint16_t add_flag = RACK_SENT_FP;
16045 struct ip6_hdr *ip6 = NULL;
16047 if (rack->r_is_v6) {
16048 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
16049 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
16054 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
16055 hdrlen = sizeof(struct tcpiphdr);
16058 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
16062 startseq = tp->snd_max;
16063 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
16064 inp = rack->rc_inp;
16065 len = rack->r_ctl.fsb.left_to_send;
16067 flags = rack->r_ctl.fsb.tcp_flags;
16068 if (tp->t_flags & TF_RCVD_TSTMP) {
16069 to.to_tsval = ms_cts + tp->ts_offset;
16070 to.to_tsecr = tp->ts_recent;
16071 to.to_flags = TOF_TS;
16073 optlen = tcp_addoptions(&to, opt);
16075 udp = rack->r_ctl.fsb.udp;
16077 hdrlen += sizeof(struct udphdr);
16078 if (rack->r_ctl.rc_pace_max_segs)
16079 max_val = rack->r_ctl.rc_pace_max_segs;
16080 else if (rack->rc_user_set_max_segs)
16081 max_val = rack->rc_user_set_max_segs * segsiz;
16084 if ((tp->t_flags & TF_TSO) &&
16091 if (MHLEN < hdrlen + max_linkhdr)
16092 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
16095 m = m_gethdr(M_NOWAIT, MT_DATA);
16098 m->m_data += max_linkhdr;
16100 th = rack->r_ctl.fsb.th;
16101 /* Establish the len to send */
16104 if ((tso) && (len + optlen > tp->t_maxseg)) {
16105 uint32_t if_hw_tsomax;
16108 /* extract TSO information */
16109 if_hw_tsomax = tp->t_tsomax;
16110 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
16111 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
16113 * Check if we should limit by maximum payload
16116 if (if_hw_tsomax != 0) {
16117 /* compute maximum TSO length */
16118 max_len = (if_hw_tsomax - hdrlen -
16120 if (max_len <= 0) {
16122 } else if (len > max_len) {
16126 if (len <= segsiz) {
16128 * In case there are too many small fragments don't
16136 if ((tso == 0) && (len > segsiz))
16139 (len <= MHLEN - hdrlen - max_linkhdr)) {
16142 sb_offset = tp->snd_max - tp->snd_una;
16143 th->th_seq = htonl(tp->snd_max);
16144 th->th_ack = htonl(tp->rcv_nxt);
16145 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
16146 if (th->th_win == 0) {
16147 tp->t_sndzerowin++;
16148 tp->t_flags |= TF_RXWIN0SENT;
16150 tp->t_flags &= ~TF_RXWIN0SENT;
16151 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
16152 KMOD_TCPSTAT_INC(tcps_sndpack);
16153 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
16155 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
16158 if (rack->r_ctl.fsb.m == NULL)
16161 /* s_mb and s_soff are saved for rack_log_output */
16162 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize,
16164 if (len <= segsiz) {
16166 * Must have ran out of mbufs for the copy
16167 * shorten it to no longer need tso. Lets
16168 * not put on sendalot since we are low on
16173 if (rack->r_ctl.fsb.rfo_apply_push &&
16174 (len == rack->r_ctl.fsb.left_to_send)) {
16176 add_flag |= RACK_HAD_PUSH;
16178 if ((m->m_next == NULL) || (len <= 0)){
16183 ulen = hdrlen + len - sizeof(struct ip6_hdr);
16185 ulen = hdrlen + len - sizeof(struct ip);
16186 udp->uh_ulen = htons(ulen);
16188 m->m_pkthdr.rcvif = (struct ifnet *)0;
16189 if (TCPS_HAVERCVDSYN(tp->t_state) &&
16190 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
16191 int ect = tcp_ecn_output_established(tp, &flags, len, false);
16192 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
16193 (tp->t_flags2 & TF2_ECN_SND_ECE))
16194 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
16196 if (rack->r_is_v6) {
16197 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
16198 ip6->ip6_flow |= htonl(ect << 20);
16204 ip->ip_tos &= ~IPTOS_ECN_MASK;
16209 tcp_set_flags(th, flags);
16210 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
16212 if (rack->r_is_v6) {
16214 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
16215 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
16216 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
16217 th->th_sum = htons(0);
16218 UDPSTAT_INC(udps_opackets);
16220 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
16221 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
16222 th->th_sum = in6_cksum_pseudo(ip6,
16223 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
16228 #if defined(INET6) && defined(INET)
16234 m->m_pkthdr.csum_flags = CSUM_UDP;
16235 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
16236 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
16237 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
16238 th->th_sum = htons(0);
16239 UDPSTAT_INC(udps_opackets);
16241 m->m_pkthdr.csum_flags = CSUM_TCP;
16242 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
16243 th->th_sum = in_pseudo(ip->ip_src.s_addr,
16244 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
16245 IPPROTO_TCP + len + optlen));
16247 /* IP version must be set here for ipv4/ipv6 checking later */
16248 KASSERT(ip->ip_v == IPVERSION,
16249 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
16253 KASSERT(len > tp->t_maxseg - optlen,
16254 ("%s: len <= tso_segsz tp:%p", __func__, tp));
16255 m->m_pkthdr.csum_flags |= CSUM_TSO;
16256 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
16259 if (rack->r_is_v6) {
16260 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
16261 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
16262 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
16263 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
16265 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
16268 #if defined(INET) && defined(INET6)
16273 ip->ip_len = htons(m->m_pkthdr.len);
16274 ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
16275 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
16276 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
16277 if (tp->t_port == 0 || len < V_tcp_minmss) {
16278 ip->ip_off |= htons(IP_DF);
16281 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
16285 /* Time to copy in our header */
16286 cpto = mtod(m, uint8_t *);
16287 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
16288 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
16290 bcopy(opt, th + 1, optlen);
16291 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
16293 th->th_off = sizeof(struct tcphdr) >> 2;
16295 if (tcp_bblogging_on(rack->rc_tp)) {
16296 union tcp_log_stackspecific log;
16298 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
16299 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
16300 if (rack->rack_no_prr)
16301 log.u_bbr.flex1 = 0;
16303 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
16304 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
16305 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
16306 log.u_bbr.flex4 = max_val;
16307 log.u_bbr.flex5 = 0;
16308 /* Save off the early/late values */
16309 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
16310 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
16311 log.u_bbr.bw_inuse = rack_get_bw(rack);
16312 log.u_bbr.flex8 = 0;
16313 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
16314 log.u_bbr.flex7 = 44;
16315 log.u_bbr.pkts_out = tp->t_maxseg;
16316 log.u_bbr.timeStamp = cts;
16317 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
16318 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
16319 log.u_bbr.delivered = 0;
16320 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
16321 len, &log, false, NULL, NULL, 0, tv);
16325 if (rack->r_is_v6) {
16326 error = ip6_output(m, NULL,
16328 0, NULL, NULL, inp);
16331 #if defined(INET) && defined(INET6)
16336 error = ip_output(m, NULL,
16342 lgb->tlb_errno = error;
16350 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv),
16351 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls);
16353 if (tp->snd_una == tp->snd_max) {
16354 rack->r_ctl.rc_tlp_rxt_last_time = cts;
16355 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
16356 tp->t_acktime = ticks;
16359 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls);
16361 rack->forced_ack = 0; /* If we send something zap the FA flag */
16363 if ((tp->t_flags & TF_GPUTINPROG) == 0)
16364 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset);
16365 tp->snd_max += len;
16366 tp->snd_nxt = tp->snd_max;
16370 idx = (len / segsiz) + 3;
16371 if (idx >= TCP_MSS_ACCT_ATIMER)
16372 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
16374 counter_u64_add(rack_out_size[idx], 1);
16376 if (len <= rack->r_ctl.fsb.left_to_send)
16377 rack->r_ctl.fsb.left_to_send -= len;
16379 rack->r_ctl.fsb.left_to_send = 0;
16380 if (rack->r_ctl.fsb.left_to_send < segsiz) {
16381 rack->r_fast_output = 0;
16382 rack->r_ctl.fsb.left_to_send = 0;
16383 /* At the end of fast_output scale up the sb */
16384 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd);
16385 rack_sndbuf_autoscale(rack);
16386 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd);
16388 if (tp->t_rtttime == 0) {
16389 tp->t_rtttime = ticks;
16390 tp->t_rtseq = startseq;
16391 KMOD_TCPSTAT_INC(tcps_segstimed);
16393 if ((rack->r_ctl.fsb.left_to_send >= segsiz) &&
16398 th = rack->r_ctl.fsb.th;
16399 #ifdef TCP_ACCOUNTING
16404 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
16405 counter_u64_add(rack_fto_send, 1);
16406 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz);
16407 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0);
16408 #ifdef TCP_ACCOUNTING
16409 crtsc = get_cyclecount();
16410 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16411 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
16413 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16414 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
16416 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16417 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz);
16425 rack->r_fast_output = 0;
16429 static struct rack_sendmap *
16430 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts)
16432 struct rack_sendmap *rsm = NULL;
16433 struct rack_sendmap fe;
16437 fe.r_start = rack->r_ctl.last_collapse_point;
16438 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
16439 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) {
16440 /* Nothing, strange turn off validity */
16441 rack->r_collapse_point_valid = 0;
16444 /* Can we send it yet? */
16445 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) {
16447 * Receiver window has not grown enough for
16448 * the segment to be put on the wire.
16452 if (rsm->r_flags & RACK_ACKED) {
16454 * It has been sacked, lets move to the
16455 * next one if possible.
16457 rack->r_ctl.last_collapse_point = rsm->r_end;
16459 if (SEQ_GEQ(rack->r_ctl.last_collapse_point,
16460 rack->r_ctl.high_collapse_point)) {
16461 rack->r_collapse_point_valid = 0;
16466 /* Now has it been long enough ? */
16467 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts);
16468 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) {
16469 rack_log_collapse(rack, rsm->r_start,
16470 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])),
16471 thresh, __LINE__, 6, rsm->r_flags, rsm);
16474 /* Not enough time */
16475 rack_log_collapse(rack, rsm->r_start,
16476 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])),
16477 thresh, __LINE__, 7, rsm->r_flags, rsm);
16482 rack_output(struct tcpcb *tp)
16486 uint32_t sb_offset, s_moff = 0;
16487 int32_t len, error = 0;
16489 struct mbuf *m, *s_mb = NULL;
16491 uint32_t if_hw_tsomaxsegcount = 0;
16492 uint32_t if_hw_tsomaxsegsize;
16493 int32_t segsiz, minseg;
16494 long tot_len_this_send = 0;
16496 struct ip *ip = NULL;
16498 struct udphdr *udp = NULL;
16499 struct tcp_rack *rack;
16503 uint8_t wanted_cookie = 0;
16504 u_char opt[TCP_MAXOLEN];
16505 unsigned ipoptlen, optlen, hdrlen, ulen=0;
16508 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
16509 unsigned ipsec_optlen = 0;
16512 int32_t idle, sendalot;
16513 int32_t sub_from_prr = 0;
16514 volatile int32_t sack_rxmit;
16515 struct rack_sendmap *rsm = NULL;
16519 int32_t sup_rack = 0;
16520 uint32_t cts, ms_cts, delayed, early;
16521 uint16_t add_flag = RACK_SENT_SP;
16522 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */
16523 uint8_t hpts_calling, doing_tlp = 0;
16524 uint32_t cwnd_to_use, pace_max_seg;
16525 int32_t do_a_prefetch = 0;
16526 int32_t prefetch_rsm = 0;
16527 int32_t orig_len = 0;
16529 int32_t prefetch_so_done = 0;
16530 struct tcp_log_buffer *lgb;
16531 struct inpcb *inp = tptoinpcb(tp);
16532 struct sockbuf *sb;
16533 uint64_t ts_val = 0;
16534 #ifdef TCP_ACCOUNTING
16538 struct ip6_hdr *ip6 = NULL;
16541 bool hw_tls = false;
16543 NET_EPOCH_ASSERT();
16544 INP_WLOCK_ASSERT(inp);
16546 /* setup and take the cache hits here */
16547 rack = (struct tcp_rack *)tp->t_fb_ptr;
16548 #ifdef TCP_ACCOUNTING
16550 ts_val = get_cyclecount();
16552 hpts_calling = inp->inp_hpts_calls;
16554 if (tp->t_flags & TF_TOE) {
16555 #ifdef TCP_ACCOUNTING
16558 return (tcp_offload_output(tp));
16562 * For TFO connections in SYN_RECEIVED, only allow the initial
16563 * SYN|ACK and those sent by the retransmit timer.
16565 if (IS_FASTOPEN(tp->t_flags) &&
16566 (tp->t_state == TCPS_SYN_RECEIVED) &&
16567 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */
16568 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */
16569 #ifdef TCP_ACCOUNTING
16575 if (rack->r_state) {
16576 /* Use the cache line loaded if possible */
16577 isipv6 = rack->r_is_v6;
16579 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0;
16583 cts = tcp_get_usecs(&tv);
16584 ms_cts = tcp_tv_to_mssectick(&tv);
16585 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
16586 tcp_in_hpts(rack->rc_inp)) {
16588 * We are on the hpts for some timer but not hptsi output.
16589 * Remove from the hpts unconditionally.
16591 rack_timer_cancel(tp, rack, cts, __LINE__);
16593 /* Are we pacing and late? */
16594 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
16595 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) {
16596 /* We are delayed */
16597 delayed = cts - rack->r_ctl.rc_last_output_to;
16601 /* Do the timers, which may override the pacer */
16602 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
16605 retval = rack_process_timers(tp, rack, cts, hpts_calling,
16608 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
16609 #ifdef TCP_ACCOUNTING
16613 * If timers want tcp_drop(), then pass error out,
16614 * otherwise suppress it.
16616 return (retval < 0 ? retval : 0);
16619 if (rack->rc_in_persist) {
16620 if (tcp_in_hpts(rack->rc_inp) == 0) {
16621 /* Timer is not running */
16622 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
16624 #ifdef TCP_ACCOUNTING
16629 if ((rack->rc_ack_required == 1) &&
16630 (rack->r_timer_override == 0)){
16631 /* A timeout occurred and no ack has arrived */
16632 if (tcp_in_hpts(rack->rc_inp) == 0) {
16633 /* Timer is not running */
16634 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
16636 #ifdef TCP_ACCOUNTING
16641 if ((rack->r_timer_override) ||
16642 (rack->rc_ack_can_sendout_data) ||
16644 (tp->t_state < TCPS_ESTABLISHED)) {
16645 rack->rc_ack_can_sendout_data = 0;
16646 if (tcp_in_hpts(rack->rc_inp))
16647 tcp_hpts_remove(rack->rc_inp);
16648 } else if (tcp_in_hpts(rack->rc_inp)) {
16650 * On the hpts you can't pass even if ACKNOW is on, we will
16651 * when the hpts fires.
16653 #ifdef TCP_ACCOUNTING
16654 crtsc = get_cyclecount();
16655 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16656 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val);
16658 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16659 tp->tcp_cnt_counters[SND_BLOCKED]++;
16663 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
16666 rack->rc_inp->inp_hpts_calls = 0;
16667 /* Finish out both pacing early and late accounting */
16668 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
16669 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) {
16670 early = rack->r_ctl.rc_last_output_to - cts;
16674 rack->r_ctl.rc_agg_delayed += delayed;
16676 } else if (early) {
16677 rack->r_ctl.rc_agg_early += early;
16680 /* Now that early/late accounting is done turn off the flag */
16681 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
16682 rack->r_wanted_output = 0;
16683 rack->r_timer_override = 0;
16684 if ((tp->t_state != rack->r_state) &&
16685 TCPS_HAVEESTABLISHED(tp->t_state)) {
16686 rack_set_state(tp, rack);
16688 if ((rack->r_fast_output) &&
16689 (doing_tlp == 0) &&
16690 (tp->rcv_numsacks == 0)) {
16694 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
16698 inp = rack->rc_inp;
16699 so = inp->inp_socket;
16704 inp = rack->rc_inp;
16706 * For TFO connections in SYN_SENT or SYN_RECEIVED,
16707 * only allow the initial SYN or SYN|ACK and those sent
16708 * by the retransmit timer.
16710 if (IS_FASTOPEN(tp->t_flags) &&
16711 ((tp->t_state == TCPS_SYN_RECEIVED) ||
16712 (tp->t_state == TCPS_SYN_SENT)) &&
16713 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */
16714 (tp->t_rxtshift == 0)) { /* not a retransmit */
16715 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
16716 so = inp->inp_socket;
16718 goto just_return_nolock;
16721 * Determine length of data that should be transmitted, and flags
16722 * that will be used. If there is some data or critical controls
16723 * (SYN, RST) to send, then transmit; otherwise, investigate
16726 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
16727 if (tp->t_idle_reduce) {
16728 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur))
16729 rack_cc_after_idle(rack, tp);
16731 tp->t_flags &= ~TF_LASTIDLE;
16733 if (tp->t_flags & TF_MORETOCOME) {
16734 tp->t_flags |= TF_LASTIDLE;
16738 if ((tp->snd_una == tp->snd_max) &&
16739 rack->r_ctl.rc_went_idle_time &&
16740 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) {
16741 idle = cts - rack->r_ctl.rc_went_idle_time;
16742 if (idle > rack_min_probertt_hold) {
16743 /* Count as a probe rtt */
16744 if (rack->in_probe_rtt == 0) {
16745 rack->r_ctl.rc_lower_rtt_us_cts = cts;
16746 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
16747 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
16748 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
16750 rack_exit_probertt(rack, cts);
16755 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED))
16756 rack_init_fsb_block(tp, rack);
16759 * If we've recently taken a timeout, snd_max will be greater than
16760 * snd_nxt. There may be SACK information that allows us to avoid
16761 * resending already delivered data. Adjust snd_nxt accordingly.
16764 cts = tcp_get_usecs(&tv);
16765 ms_cts = tcp_tv_to_mssectick(&tv);
16768 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
16770 if (rack->r_ctl.rc_pace_max_segs == 0)
16771 pace_max_seg = rack->rc_user_set_max_segs * segsiz;
16773 pace_max_seg = rack->r_ctl.rc_pace_max_segs;
16774 sb_offset = tp->snd_max - tp->snd_una;
16775 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
16776 flags = tcp_outflags[tp->t_state];
16777 while (rack->rc_free_cnt < rack_free_cache) {
16778 rsm = rack_alloc(rack);
16780 if (inp->inp_hpts_calls)
16781 /* Retry in a ms */
16782 slot = (1 * HPTS_USEC_IN_MSEC);
16783 so = inp->inp_socket;
16785 goto just_return_nolock;
16787 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
16788 rack->rc_free_cnt++;
16791 if (inp->inp_hpts_calls)
16792 inp->inp_hpts_calls = 0;
16796 if (flags & TH_RST) {
16797 SOCKBUF_LOCK(&inp->inp_socket->so_snd);
16798 so = inp->inp_socket;
16802 if (rack->r_ctl.rc_resend) {
16803 /* Retransmit timer */
16804 rsm = rack->r_ctl.rc_resend;
16805 rack->r_ctl.rc_resend = NULL;
16806 len = rsm->r_end - rsm->r_start;
16809 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
16810 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
16811 __func__, __LINE__,
16812 rsm->r_start, tp->snd_una, tp, rack, rsm));
16813 sb_offset = rsm->r_start - tp->snd_una;
16816 } else if (rack->r_collapse_point_valid &&
16817 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) {
16819 * If an RSM is returned then enough time has passed
16820 * for us to retransmit it. Move up the collapse point,
16821 * since this rsm has its chance to retransmit now.
16823 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT);
16824 rack->r_ctl.last_collapse_point = rsm->r_end;
16826 if (SEQ_GEQ(rack->r_ctl.last_collapse_point,
16827 rack->r_ctl.high_collapse_point))
16828 rack->r_collapse_point_valid = 0;
16830 /* We are not doing a TLP */
16832 len = rsm->r_end - rsm->r_start;
16833 sb_offset = rsm->r_start - tp->snd_una;
16835 if ((rack->full_size_rxt == 0) &&
16836 (rack->shape_rxt_to_pacing_min == 0) &&
16839 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) {
16840 /* We have a retransmit that takes precedence */
16841 if ((!IN_FASTRECOVERY(tp->t_flags)) &&
16842 ((rsm->r_flags & RACK_MUST_RXT) == 0) &&
16843 ((tp->t_flags & TF_WASFRECOVERY) == 0)) {
16844 /* Enter recovery if not induced by a time-out */
16845 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
16848 if (SEQ_LT(rsm->r_start, tp->snd_una)) {
16849 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
16850 tp, rack, rsm, rsm->r_start, tp->snd_una);
16853 len = rsm->r_end - rsm->r_start;
16854 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
16855 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
16856 __func__, __LINE__,
16857 rsm->r_start, tp->snd_una, tp, rack, rsm));
16858 sb_offset = rsm->r_start - tp->snd_una;
16864 KMOD_TCPSTAT_INC(tcps_sack_rexmits);
16865 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes,
16868 } else if (rack->r_ctl.rc_tlpsend) {
16869 /* Tail loss probe */
16874 * Check if we can do a TLP with a RACK'd packet
16875 * this can happen if we are not doing the rack
16876 * cheat and we skipped to a TLP and it
16879 rsm = rack->r_ctl.rc_tlpsend;
16880 /* We are doing a TLP make sure the flag is preent */
16881 rsm->r_flags |= RACK_TLP;
16882 rack->r_ctl.rc_tlpsend = NULL;
16884 tlen = rsm->r_end - rsm->r_start;
16887 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
16888 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
16889 __func__, __LINE__,
16890 rsm->r_start, tp->snd_una, tp, rack, rsm));
16891 sb_offset = rsm->r_start - tp->snd_una;
16892 cwin = min(tp->snd_wnd, tlen);
16895 if (rack->r_must_retran &&
16896 (doing_tlp == 0) &&
16897 (SEQ_GT(tp->snd_max, tp->snd_una)) &&
16900 * There are two different ways that we
16901 * can get into this block:
16902 * a) This is a non-sack connection, we had a time-out
16903 * and thus r_must_retran was set and everything
16904 * left outstanding as been marked for retransmit.
16905 * b) The MTU of the path shrank, so that everything
16906 * was marked to be retransmitted with the smaller
16907 * mtu and r_must_retran was set.
16909 * This means that we expect the sendmap (outstanding)
16910 * to all be marked must. We can use the tmap to
16914 int sendwin, flight;
16916 sendwin = min(tp->snd_wnd, tp->snd_cwnd);
16917 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto);
16918 if (flight >= sendwin) {
16920 * We can't send yet.
16922 so = inp->inp_socket;
16924 goto just_return_nolock;
16927 * This is the case a/b mentioned above. All
16928 * outstanding/not-acked should be marked.
16929 * We can use the tmap to find them.
16931 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
16934 rack->r_must_retran = 0;
16935 rack->r_ctl.rc_out_at_rto = 0;
16936 so = inp->inp_socket;
16938 goto just_return_nolock;
16940 if ((rsm->r_flags & RACK_MUST_RXT) == 0) {
16942 * The first one does not have the flag, did we collapse
16943 * further up in our list?
16945 rack->r_must_retran = 0;
16946 rack->r_ctl.rc_out_at_rto = 0;
16951 len = rsm->r_end - rsm->r_start;
16952 sb_offset = rsm->r_start - tp->snd_una;
16954 if ((rack->full_size_rxt == 0) &&
16955 (rack->shape_rxt_to_pacing_min == 0) &&
16959 * Delay removing the flag RACK_MUST_RXT so
16960 * that the fastpath for retransmit will
16961 * work with this rsm.
16966 * Enforce a connection sendmap count limit if set
16967 * as long as we are not retransmiting.
16969 if ((rsm == NULL) &&
16970 (rack->do_detection == 0) &&
16971 (V_tcp_map_entries_limit > 0) &&
16972 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
16973 counter_u64_add(rack_to_alloc_limited, 1);
16974 if (!rack->alloc_limit_reported) {
16975 rack->alloc_limit_reported = 1;
16976 counter_u64_add(rack_alloc_limited_conns, 1);
16978 so = inp->inp_socket;
16980 goto just_return_nolock;
16982 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
16983 /* we are retransmitting the fin */
16987 * When retransmitting data do *not* include the
16988 * FIN. This could happen from a TLP probe.
16993 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo &&
16994 ((rsm->r_flags & RACK_HAS_FIN) == 0)) {
16997 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp);
17001 so = inp->inp_socket;
17003 if (do_a_prefetch == 0) {
17004 kern_prefetch(sb, &do_a_prefetch);
17007 #ifdef NETFLIX_SHARED_CWND
17008 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) &&
17009 rack->rack_enable_scwnd) {
17010 /* We are doing cwnd sharing */
17011 if (rack->gp_ready &&
17012 (rack->rack_attempted_scwnd == 0) &&
17013 (rack->r_ctl.rc_scw == NULL) &&
17015 /* The pcbid is in, lets make an attempt */
17016 counter_u64_add(rack_try_scwnd, 1);
17017 rack->rack_attempted_scwnd = 1;
17018 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp,
17019 &rack->r_ctl.rc_scw_index,
17022 if (rack->r_ctl.rc_scw &&
17023 (rack->rack_scwnd_is_idle == 1) &&
17024 sbavail(&so->so_snd)) {
17025 /* we are no longer out of data */
17026 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
17027 rack->rack_scwnd_is_idle = 0;
17029 if (rack->r_ctl.rc_scw) {
17030 /* First lets update and get the cwnd */
17031 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw,
17032 rack->r_ctl.rc_scw_index,
17033 tp->snd_cwnd, tp->snd_wnd, segsiz);
17038 * Get standard flags, and add SYN or FIN if requested by 'hidden'
17041 if (tp->t_flags & TF_NEEDFIN)
17043 if (tp->t_flags & TF_NEEDSYN)
17045 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
17047 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
17049 kern_prefetch(end_rsm, &prefetch_rsm);
17054 * If snd_nxt == snd_max and we have transmitted a FIN, the
17055 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
17056 * negative length. This can also occur when TCP opens up its
17057 * congestion window while receiving additional duplicate acks after
17058 * fast-retransmit because TCP will reset snd_nxt to snd_max after
17059 * the fast-retransmit.
17061 * In the normal retransmit-FIN-only case, however, snd_nxt will be
17062 * set to snd_una, the sb_offset will be 0, and the length may wind
17065 * If sack_rxmit is true we are retransmitting from the scoreboard
17066 * in which case len is already set.
17068 if ((sack_rxmit == 0) &&
17069 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) {
17072 avail = sbavail(sb);
17073 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail)
17074 sb_offset = tp->snd_nxt - tp->snd_una;
17077 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) {
17078 if (rack->r_ctl.rc_tlp_new_data) {
17079 /* TLP is forcing out new data */
17080 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
17081 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
17083 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) {
17084 if (tp->snd_wnd > sb_offset)
17085 len = tp->snd_wnd - sb_offset;
17089 len = rack->r_ctl.rc_tlp_new_data;
17091 rack->r_ctl.rc_tlp_new_data = 0;
17093 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset);
17095 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) {
17097 * For prr=off, we need to send only 1 MSS
17098 * at a time. We do this because another sack could
17099 * be arriving that causes us to send retransmits and
17100 * we don't want to be on a long pace due to a larger send
17101 * that keeps us from sending out the retransmit.
17106 uint32_t outstanding;
17108 * We are inside of a Fast recovery episode, this
17109 * is caused by a SACK or 3 dup acks. At this point
17110 * we have sent all the retransmissions and we rely
17111 * on PRR to dictate what we will send in the form of
17115 outstanding = tp->snd_max - tp->snd_una;
17116 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) {
17117 if (tp->snd_wnd > outstanding) {
17118 len = tp->snd_wnd - outstanding;
17119 /* Check to see if we have the data */
17120 if ((sb_offset + len) > avail) {
17121 /* It does not all fit */
17122 if (avail > sb_offset)
17123 len = avail - sb_offset;
17130 } else if (avail > sb_offset) {
17131 len = avail - sb_offset;
17136 if (len > rack->r_ctl.rc_prr_sndcnt) {
17137 len = rack->r_ctl.rc_prr_sndcnt;
17143 if (len > segsiz) {
17145 * We should never send more than a MSS when
17146 * retransmitting or sending new data in prr
17147 * mode unless the override flag is on. Most
17148 * likely the PRR algorithm is not going to
17149 * let us send a lot as well :-)
17151 if (rack->r_ctl.rc_prr_sendalot == 0) {
17154 } else if (len < segsiz) {
17156 * Do we send any? The idea here is if the
17157 * send empty's the socket buffer we want to
17158 * do it. However if not then lets just wait
17159 * for our prr_sndcnt to get bigger.
17163 leftinsb = sbavail(sb) - sb_offset;
17164 if (leftinsb > len) {
17165 /* This send does not empty the sb */
17170 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) {
17172 * If you have not established
17173 * and are not doing FAST OPEN
17176 if ((sack_rxmit == 0) &&
17177 (!IS_FASTOPEN(tp->t_flags))){
17182 if (prefetch_so_done == 0) {
17183 kern_prefetch(so, &prefetch_so_done);
17184 prefetch_so_done = 1;
17187 * Lop off SYN bit if it has already been sent. However, if this is
17188 * SYN-SENT state and if segment contains data and if we don't know
17189 * that foreign host supports TAO, suppress sending segment.
17191 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) &&
17192 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) {
17194 * When sending additional segments following a TFO SYN|ACK,
17195 * do not include the SYN bit.
17197 if (IS_FASTOPEN(tp->t_flags) &&
17198 (tp->t_state == TCPS_SYN_RECEIVED))
17202 * Be careful not to send data and/or FIN on SYN segments. This
17203 * measure is needed to prevent interoperability problems with not
17204 * fully conformant TCP implementations.
17206 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
17211 * On TFO sockets, ensure no data is sent in the following cases:
17213 * - When retransmitting SYN|ACK on a passively-created socket
17215 * - When retransmitting SYN on an actively created socket
17217 * - When sending a zero-length cookie (cookie request) on an
17218 * actively created socket
17220 * - When the socket is in the CLOSED state (RST is being sent)
17222 if (IS_FASTOPEN(tp->t_flags) &&
17223 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
17224 ((tp->t_state == TCPS_SYN_SENT) &&
17225 (tp->t_tfo_client_cookie_len == 0)) ||
17226 (flags & TH_RST))) {
17230 /* Without fast-open there should never be data sent on a SYN */
17231 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) {
17232 tp->snd_nxt = tp->iss;
17235 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) {
17236 /* We only send 1 MSS if we have a DSACK block */
17237 add_flag |= RACK_SENT_W_DSACK;
17243 * If FIN has been sent but not acked, but we haven't been
17244 * called to retransmit, len will be < 0. Otherwise, window
17245 * shrank after we sent into it. If window shrank to 0,
17246 * cancel pending retransmit, pull snd_nxt back to (closed)
17247 * window, and set the persist timer if it isn't already
17248 * going. If the window didn't close completely, just wait
17251 * We also do a general check here to ensure that we will
17252 * set the persist timer when we have data to send, but a
17253 * 0-byte window. This makes sure the persist timer is set
17254 * even if the packet hits one of the "goto send" lines
17258 if ((tp->snd_wnd == 0) &&
17259 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
17260 (tp->snd_una == tp->snd_max) &&
17261 (sb_offset < (int)sbavail(sb))) {
17262 rack_enter_persist(tp, rack, cts);
17264 } else if ((rsm == NULL) &&
17265 (doing_tlp == 0) &&
17266 (len < pace_max_seg)) {
17268 * We are not sending a maximum sized segment for
17269 * some reason. Should we not send anything (think
17270 * sws or persists)?
17272 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
17273 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
17275 (len < (int)(sbavail(sb) - sb_offset))) {
17277 * Here the rwnd is less than
17278 * the minimum pacing size, this is not a retransmit,
17279 * we are established and
17280 * the send is not the last in the socket buffer
17281 * we send nothing, and we may enter persists
17282 * if nothing is outstanding.
17285 if (tp->snd_max == tp->snd_una) {
17287 * Nothing out we can
17288 * go into persists.
17290 rack_enter_persist(tp, rack, cts);
17292 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) &&
17293 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
17294 (len < (int)(sbavail(sb) - sb_offset)) &&
17297 * Here we are not retransmitting, and
17298 * the cwnd is not so small that we could
17299 * not send at least a min size (rxt timer
17300 * not having gone off), We have 2 segments or
17301 * more already in flight, its not the tail end
17302 * of the socket buffer and the cwnd is blocking
17303 * us from sending out a minimum pacing segment size.
17304 * Lets not send anything.
17307 } else if (((tp->snd_wnd - ctf_outstanding(tp)) <
17308 min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
17309 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
17310 (len < (int)(sbavail(sb) - sb_offset)) &&
17311 (TCPS_HAVEESTABLISHED(tp->t_state))) {
17313 * Here we have a send window but we have
17314 * filled it up and we can't send another pacing segment.
17315 * We also have in flight more than 2 segments
17316 * and we are not completing the sb i.e. we allow
17317 * the last bytes of the sb to go out even if
17318 * its not a full pacing segment.
17321 } else if ((rack->r_ctl.crte != NULL) &&
17322 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) &&
17323 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) &&
17324 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) &&
17325 (len < (int)(sbavail(sb) - sb_offset))) {
17327 * Here we are doing hardware pacing, this is not a TLP,
17328 * we are not sending a pace max segment size, there is rwnd
17329 * room to send at least N pace_max_seg, the cwnd is greater
17330 * than or equal to a full pacing segments plus 4 mss and we have 2 or
17331 * more segments in flight and its not the tail of the socket buffer.
17333 * We don't want to send instead we need to get more ack's in to
17334 * allow us to send a full pacing segment. Normally, if we are pacing
17335 * about the right speed, we should have finished our pacing
17336 * send as most of the acks have come back if we are at the
17337 * right rate. This is a bit fuzzy since return path delay
17338 * can delay the acks, which is why we want to make sure we
17339 * have cwnd space to have a bit more than a max pace segments in flight.
17341 * If we have not gotten our acks back we are pacing at too high a
17342 * rate delaying will not hurt and will bring our GP estimate down by
17343 * injecting the delay. If we don't do this we will send
17344 * 2 MSS out in response to the acks being clocked in which
17345 * defeats the point of hw-pacing (i.e. to help us get
17346 * larger TSO's out).
17353 /* len will be >= 0 after this point. */
17354 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
17355 rack_sndbuf_autoscale(rack);
17357 * Decide if we can use TCP Segmentation Offloading (if supported by
17360 * TSO may only be used if we are in a pure bulk sending state. The
17361 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
17362 * options prevent using TSO. With TSO the TCP header is the same
17363 * (except for the sequence number) for all generated packets. This
17364 * makes it impossible to transmit any options which vary per
17365 * generated segment or packet.
17367 * IPv4 handling has a clear separation of ip options and ip header
17368 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
17369 * the right thing below to provide length of just ip options and thus
17370 * checking for ipoptlen is enough to decide if ip options are present.
17373 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
17375 * Pre-calculate here as we save another lookup into the darknesses
17376 * of IPsec that way and can actually decide if TSO is ok.
17379 if (isipv6 && IPSEC_ENABLED(ipv6))
17380 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp);
17386 if (IPSEC_ENABLED(ipv4))
17387 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp);
17391 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
17392 ipoptlen += ipsec_optlen;
17394 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz &&
17395 (tp->t_port == 0) &&
17396 ((tp->t_flags & TF_SIGNATURE) == 0) &&
17397 tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
17401 uint32_t outstanding __unused;
17403 outstanding = tp->snd_max - tp->snd_una;
17404 if (tp->t_flags & TF_SENTFIN) {
17406 * If we sent a fin, snd_max is 1 higher than
17412 if ((rsm->r_flags & RACK_HAS_FIN) == 0)
17415 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
17420 recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
17421 (long)TCP_MAXWIN << tp->rcv_scale);
17424 * Sender silly window avoidance. We transmit under the following
17425 * conditions when len is non-zero:
17427 * - We have a full segment (or more with TSO) - This is the last
17428 * buffer in a write()/send() and we are either idle or running
17429 * NODELAY - we've timed out (e.g. persist timer) - we have more
17430 * then 1/2 the maximum send window's worth of data (receiver may be
17431 * limited the window size) - we need to retransmit
17434 if (len >= segsiz) {
17438 * NOTE! on localhost connections an 'ack' from the remote
17439 * end may occur synchronously with the output and cause us
17440 * to flush a buffer queued with moretocome. XXX
17443 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
17444 (idle || (tp->t_flags & TF_NODELAY)) &&
17445 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
17446 (tp->t_flags & TF_NOPUSH) == 0) {
17450 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */
17454 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
17458 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */
17466 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) &&
17467 (ctf_outstanding(tp) < (segsiz * 2))) {
17469 * We have less than two MSS outstanding (delayed ack)
17470 * and our rwnd will not let us send a full sized
17471 * MSS. Lets go ahead and let this small segment
17472 * out because we want to try to have at least two
17473 * packets inflight to not be caught by delayed ack.
17480 * Sending of standalone window updates.
17482 * Window updates are important when we close our window due to a
17483 * full socket buffer and are opening it again after the application
17484 * reads data from it. Once the window has opened again and the
17485 * remote end starts to send again the ACK clock takes over and
17486 * provides the most current window information.
17488 * We must avoid the silly window syndrome whereas every read from
17489 * the receive buffer, no matter how small, causes a window update
17490 * to be sent. We also should avoid sending a flurry of window
17491 * updates when the socket buffer had queued a lot of data and the
17492 * application is doing small reads.
17494 * Prevent a flurry of pointless window updates by only sending an
17495 * update when we can increase the advertized window by more than
17496 * 1/4th of the socket buffer capacity. When the buffer is getting
17497 * full or is very small be more aggressive and send an update
17498 * whenever we can increase by two mss sized segments. In all other
17499 * situations the ACK's to new incoming data will carry further
17500 * window increases.
17502 * Don't send an independent window update if a delayed ACK is
17503 * pending (it will get piggy-backed on it) or the remote side
17504 * already has done a half-close and won't send more data. Skip
17505 * this if the connection is in T/TCP half-open state.
17507 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
17508 !(tp->t_flags & TF_DELACK) &&
17509 !TCPS_HAVERCVDFIN(tp->t_state)) {
17511 * "adv" is the amount we could increase the window, taking
17512 * into account that we are limited by TCP_MAXWIN <<
17519 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
17520 oldwin = (tp->rcv_adv - tp->rcv_nxt);
17524 /* We can't increase the window */
17531 * If the new window size ends up being the same as or less
17532 * than the old size when it is scaled, then don't force
17535 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
17538 if (adv >= (int32_t)(2 * segsiz) &&
17539 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
17540 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
17541 so->so_rcv.sb_hiwat <= 8 * segsiz)) {
17545 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) {
17553 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
17554 * is also a catch-all for the retransmit timer timeout case.
17556 if (tp->t_flags & TF_ACKNOW) {
17560 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
17565 * If our state indicates that FIN should be sent and we have not
17566 * yet done so, then we need to send.
17568 if ((flags & TH_FIN) &&
17569 (tp->snd_nxt == tp->snd_una)) {
17574 * No reason to send a segment, just return.
17577 SOCKBUF_UNLOCK(sb);
17578 just_return_nolock:
17580 int app_limited = CTF_JR_SENT_DATA;
17582 if (tot_len_this_send > 0) {
17583 /* Make sure snd_nxt is up to max */
17584 rack->r_ctl.fsb.recwin = recwin;
17585 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz);
17586 if ((error == 0) &&
17588 ((flags & (TH_SYN|TH_FIN)) == 0) &&
17590 (tp->snd_nxt == tp->snd_max) &&
17591 (tp->rcv_numsacks == 0) &&
17592 rack->r_fsb_inited &&
17593 TCPS_HAVEESTABLISHED(tp->t_state) &&
17594 (rack->r_must_retran == 0) &&
17595 ((tp->t_flags & TF_NEEDFIN) == 0) &&
17596 (len > 0) && (orig_len > 0) &&
17597 (orig_len > len) &&
17598 ((orig_len - len) >= segsiz) &&
17600 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
17601 /* We can send at least one more MSS using our fsb */
17603 rack->r_fast_output = 1;
17604 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
17605 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
17606 rack->r_ctl.fsb.tcp_flags = flags;
17607 rack->r_ctl.fsb.left_to_send = orig_len - len;
17609 rack->r_ctl.fsb.hw_tls = 1;
17611 rack->r_ctl.fsb.hw_tls = 0;
17612 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
17613 ("rack:%p left_to_send:%u sbavail:%u out:%u",
17614 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
17615 (tp->snd_max - tp->snd_una)));
17616 if (rack->r_ctl.fsb.left_to_send < segsiz)
17617 rack->r_fast_output = 0;
17619 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
17620 rack->r_ctl.fsb.rfo_apply_push = 1;
17622 rack->r_ctl.fsb.rfo_apply_push = 0;
17625 rack->r_fast_output = 0;
17628 rack_log_fsb(rack, tp, so, flags,
17629 ipoptlen, orig_len, len, 0,
17630 1, optlen, __LINE__, 1);
17631 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
17632 tp->snd_nxt = tp->snd_max;
17634 int end_window = 0;
17635 uint32_t seq = tp->gput_ack;
17637 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
17640 * Mark the last sent that we just-returned (hinting
17641 * that delayed ack may play a role in any rtt measurement).
17643 rsm->r_just_ret = 1;
17645 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
17646 rack->r_ctl.rc_agg_delayed = 0;
17649 rack->r_ctl.rc_agg_early = 0;
17650 if ((ctf_outstanding(tp) +
17651 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)),
17652 minseg)) >= tp->snd_wnd) {
17653 /* We are limited by the rwnd */
17654 app_limited = CTF_JR_RWND_LIMITED;
17655 if (IN_FASTRECOVERY(tp->t_flags))
17656 rack->r_ctl.rc_prr_sndcnt = 0;
17657 } else if (ctf_outstanding(tp) >= sbavail(sb)) {
17658 /* We are limited by whats available -- app limited */
17659 app_limited = CTF_JR_APP_LIMITED;
17660 if (IN_FASTRECOVERY(tp->t_flags))
17661 rack->r_ctl.rc_prr_sndcnt = 0;
17662 } else if ((idle == 0) &&
17663 ((tp->t_flags & TF_NODELAY) == 0) &&
17664 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
17667 * No delay is not on and the
17668 * user is sending less than 1MSS. This
17669 * brings out SWS avoidance so we
17670 * don't send. Another app-limited case.
17672 app_limited = CTF_JR_APP_LIMITED;
17673 } else if (tp->t_flags & TF_NOPUSH) {
17675 * The user has requested no push of
17676 * the last segment and we are
17677 * at the last segment. Another app
17680 app_limited = CTF_JR_APP_LIMITED;
17681 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) {
17683 app_limited = CTF_JR_CWND_LIMITED;
17684 } else if (IN_FASTRECOVERY(tp->t_flags) &&
17685 (rack->rack_no_prr == 0) &&
17686 (rack->r_ctl.rc_prr_sndcnt < segsiz)) {
17687 app_limited = CTF_JR_PRR;
17689 /* Now why here are we not sending? */
17692 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use);
17695 app_limited = CTF_JR_ASSESSING;
17698 * App limited in some fashion, for our pacing GP
17699 * measurements we don't want any gap (even cwnd).
17700 * Close down the measurement window.
17702 if (rack_cwnd_block_ends_measure &&
17703 ((app_limited == CTF_JR_CWND_LIMITED) ||
17704 (app_limited == CTF_JR_PRR))) {
17706 * The reason we are not sending is
17707 * the cwnd (or prr). We have been configured
17708 * to end the measurement window in
17712 } else if (rack_rwnd_block_ends_measure &&
17713 (app_limited == CTF_JR_RWND_LIMITED)) {
17715 * We are rwnd limited and have been
17716 * configured to end the measurement
17717 * window in this case.
17720 } else if (app_limited == CTF_JR_APP_LIMITED) {
17722 * A true application limited period, we have
17726 } else if (app_limited == CTF_JR_ASSESSING) {
17728 * In the assessing case we hit the end of
17729 * the if/else and had no known reason
17730 * This will panic us under invariants..
17732 * If we get this out in logs we need to
17733 * investagate which reason we missed.
17740 /* Adjust the Gput measurement */
17741 if ((tp->t_flags & TF_GPUTINPROG) &&
17742 SEQ_GT(tp->gput_ack, tp->snd_max)) {
17743 tp->gput_ack = tp->snd_max;
17744 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) {
17746 * There is not enough to measure.
17748 tp->t_flags &= ~TF_GPUTINPROG;
17749 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
17750 rack->r_ctl.rc_gp_srtt /*flex1*/,
17752 0, 0, 18, __LINE__, NULL, 0);
17756 /* Mark the last packet has app limited */
17757 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
17758 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
17759 if (rack->r_ctl.rc_app_limited_cnt == 0)
17760 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
17763 * Go out to the end app limited and mark
17764 * this new one as next and move the end_appl up
17767 if (rack->r_ctl.rc_end_appl)
17768 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
17769 rack->r_ctl.rc_end_appl = rsm;
17771 rsm->r_flags |= RACK_APP_LIMITED;
17772 rack->r_ctl.rc_app_limited_cnt++;
17775 rack_log_pacing_delay_calc(rack,
17776 rack->r_ctl.rc_app_limited_cnt, seq,
17777 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0);
17780 /* Check if we need to go into persists or not */
17781 if ((tp->snd_max == tp->snd_una) &&
17782 TCPS_HAVEESTABLISHED(tp->t_state) &&
17784 (sbavail(sb) > tp->snd_wnd) &&
17785 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) {
17786 /* Yes lets make sure to move to persist before timer-start */
17787 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
17789 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack);
17790 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use);
17792 #ifdef NETFLIX_SHARED_CWND
17793 if ((sbavail(sb) == 0) &&
17794 rack->r_ctl.rc_scw) {
17795 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
17796 rack->rack_scwnd_is_idle = 1;
17799 #ifdef TCP_ACCOUNTING
17800 if (tot_len_this_send > 0) {
17801 crtsc = get_cyclecount();
17802 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17803 tp->tcp_cnt_counters[SND_OUT_DATA]++;
17805 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17806 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
17808 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17809 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz);
17812 crtsc = get_cyclecount();
17813 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17814 tp->tcp_cnt_counters[SND_LIMITED]++;
17816 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17817 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val);
17825 if (rsm || sack_rxmit)
17826 counter_u64_add(rack_nfto_resend, 1);
17828 counter_u64_add(rack_non_fto_send, 1);
17829 if ((flags & TH_FIN) &&
17832 * We do not transmit a FIN
17833 * with data outstanding. We
17834 * need to make it so all data
17839 /* Enforce stack imposed max seg size if we have one */
17840 if (rack->r_ctl.rc_pace_max_segs &&
17841 (len > rack->r_ctl.rc_pace_max_segs)) {
17843 len = rack->r_ctl.rc_pace_max_segs;
17845 SOCKBUF_LOCK_ASSERT(sb);
17848 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
17850 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
17853 * Before ESTABLISHED, force sending of initial options unless TCP
17854 * set not to do any options. NOTE: we assume that the IP/TCP header
17855 * plus TCP options always fit in a single mbuf, leaving room for a
17856 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
17857 * + optlen <= MCLBYTES
17862 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
17865 hdrlen = sizeof(struct tcpiphdr);
17868 * Compute options for segment. We only have to care about SYN and
17869 * established connection segments. Options for SYN-ACK segments
17870 * are handled in TCP syncache.
17873 if ((tp->t_flags & TF_NOOPT) == 0) {
17874 /* Maximum segment size. */
17875 if (flags & TH_SYN) {
17876 tp->snd_nxt = tp->iss;
17877 to.to_mss = tcp_mssopt(&inp->inp_inc);
17879 to.to_mss -= V_tcp_udp_tunneling_overhead;
17880 to.to_flags |= TOF_MSS;
17883 * On SYN or SYN|ACK transmits on TFO connections,
17884 * only include the TFO option if it is not a
17885 * retransmit, as the presence of the TFO option may
17886 * have caused the original SYN or SYN|ACK to have
17887 * been dropped by a middlebox.
17889 if (IS_FASTOPEN(tp->t_flags) &&
17890 (tp->t_rxtshift == 0)) {
17891 if (tp->t_state == TCPS_SYN_RECEIVED) {
17892 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
17894 (u_int8_t *)&tp->t_tfo_cookie.server;
17895 to.to_flags |= TOF_FASTOPEN;
17897 } else if (tp->t_state == TCPS_SYN_SENT) {
17899 tp->t_tfo_client_cookie_len;
17901 tp->t_tfo_cookie.client;
17902 to.to_flags |= TOF_FASTOPEN;
17905 * If we wind up having more data to
17906 * send with the SYN than can fit in
17907 * one segment, don't send any more
17908 * until the SYN|ACK comes back from
17915 /* Window scaling. */
17916 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
17917 to.to_wscale = tp->request_r_scale;
17918 to.to_flags |= TOF_SCALE;
17921 if ((tp->t_flags & TF_RCVD_TSTMP) ||
17922 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
17923 to.to_tsval = ms_cts + tp->ts_offset;
17924 to.to_tsecr = tp->ts_recent;
17925 to.to_flags |= TOF_TS;
17927 /* Set receive buffer autosizing timestamp. */
17928 if (tp->rfbuf_ts == 0 &&
17929 (so->so_rcv.sb_flags & SB_AUTOSIZE))
17930 tp->rfbuf_ts = tcp_ts_getticks();
17931 /* Selective ACK's. */
17932 if (tp->t_flags & TF_SACK_PERMIT) {
17933 if (flags & TH_SYN)
17934 to.to_flags |= TOF_SACKPERM;
17935 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
17936 tp->rcv_numsacks > 0) {
17937 to.to_flags |= TOF_SACK;
17938 to.to_nsacks = tp->rcv_numsacks;
17939 to.to_sacks = (u_char *)tp->sackblks;
17942 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
17943 /* TCP-MD5 (RFC2385). */
17944 if (tp->t_flags & TF_SIGNATURE)
17945 to.to_flags |= TOF_SIGNATURE;
17946 #endif /* TCP_SIGNATURE */
17948 /* Processing the options. */
17949 hdrlen += optlen = tcp_addoptions(&to, opt);
17951 * If we wanted a TFO option to be added, but it was unable
17952 * to fit, ensure no data is sent.
17954 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
17955 !(to.to_flags & TOF_FASTOPEN))
17959 if (V_tcp_udp_tunneling_port == 0) {
17960 /* The port was removed?? */
17961 SOCKBUF_UNLOCK(&so->so_snd);
17962 #ifdef TCP_ACCOUNTING
17963 crtsc = get_cyclecount();
17964 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17965 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
17967 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17968 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
17972 return (EHOSTUNREACH);
17974 hdrlen += sizeof(struct udphdr);
17978 ipoptlen = ip6_optlen(inp);
17981 if (inp->inp_options)
17982 ipoptlen = inp->inp_options->m_len -
17983 offsetof(struct ipoption, ipopt_list);
17986 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
17987 ipoptlen += ipsec_optlen;
17991 * Adjust data length if insertion of options will bump the packet
17992 * length beyond the t_maxseg length. Clear the FIN bit because we
17993 * cut off the tail of the segment.
17995 if (len + optlen + ipoptlen > tp->t_maxseg) {
17997 uint32_t if_hw_tsomax;
18001 /* extract TSO information */
18002 if_hw_tsomax = tp->t_tsomax;
18003 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
18004 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
18005 KASSERT(ipoptlen == 0,
18006 ("%s: TSO can't do IP options", __func__));
18009 * Check if we should limit by maximum payload
18012 if (if_hw_tsomax != 0) {
18013 /* compute maximum TSO length */
18014 max_len = (if_hw_tsomax - hdrlen -
18016 if (max_len <= 0) {
18018 } else if (len > max_len) {
18025 * Prevent the last segment from being fractional
18026 * unless the send sockbuf can be emptied:
18028 max_len = (tp->t_maxseg - optlen);
18029 if ((sb_offset + len) < sbavail(sb)) {
18030 moff = len % (u_int)max_len;
18037 * In case there are too many small fragments don't
18040 if (len <= segsiz) {
18045 * Send the FIN in a separate segment after the bulk
18046 * sending is done. We don't trust the TSO
18047 * implementations to clear the FIN flag on all but
18048 * the last segment.
18050 if (tp->t_flags & TF_NEEDFIN) {
18055 if (optlen + ipoptlen >= tp->t_maxseg) {
18057 * Since we don't have enough space to put
18058 * the IP header chain and the TCP header in
18059 * one packet as required by RFC 7112, don't
18060 * send it. Also ensure that at least one
18061 * byte of the payload can be put into the
18064 SOCKBUF_UNLOCK(&so->so_snd);
18069 len = tp->t_maxseg - optlen - ipoptlen;
18076 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
18077 ("%s: len > IP_MAXPACKET", __func__));
18080 if (max_linkhdr + hdrlen > MCLBYTES)
18082 if (max_linkhdr + hdrlen > MHLEN)
18084 panic("tcphdr too big");
18088 * This KASSERT is here to catch edge cases at a well defined place.
18089 * Before, those had triggered (random) panic conditions further
18092 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
18094 (flags & TH_FIN) &&
18097 * We have outstanding data, don't send a fin by itself!.
18102 * Grab a header mbuf, attaching a copy of data to be transmitted,
18103 * and initialize the header from the template for sends on this
18106 hw_tls = tp->t_nic_ktls_xmit != 0;
18111 if (rack->r_ctl.rc_pace_max_segs)
18112 max_val = rack->r_ctl.rc_pace_max_segs;
18113 else if (rack->rc_user_set_max_segs)
18114 max_val = rack->rc_user_set_max_segs * segsiz;
18118 * We allow a limit on sending with hptsi.
18120 if (len > max_val) {
18125 if (MHLEN < hdrlen + max_linkhdr)
18126 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
18129 m = m_gethdr(M_NOWAIT, MT_DATA);
18132 SOCKBUF_UNLOCK(sb);
18137 m->m_data += max_linkhdr;
18141 * Start the m_copy functions from the closest mbuf to the
18142 * sb_offset in the socket buffer chain.
18144 mb = sbsndptr_noadv(sb, sb_offset, &moff);
18147 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
18148 m_copydata(mb, moff, (int)len,
18149 mtod(m, caddr_t)+hdrlen);
18150 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
18151 sbsndptr_adv(sb, mb, len);
18154 struct sockbuf *msb;
18156 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
18160 m->m_next = tcp_m_copym(
18162 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
18163 ((rsm == NULL) ? hw_tls : 0)
18164 #ifdef NETFLIX_COPY_ARGS
18168 if (len <= (tp->t_maxseg - optlen)) {
18170 * Must have ran out of mbufs for the copy
18171 * shorten it to no longer need tso. Lets
18172 * not put on sendalot since we are low on
18177 if (m->m_next == NULL) {
18178 SOCKBUF_UNLOCK(sb);
18185 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
18186 if (rsm && (rsm->r_flags & RACK_TLP)) {
18188 * TLP should not count in retran count, but
18191 counter_u64_add(rack_tlp_retran, 1);
18192 counter_u64_add(rack_tlp_retran_bytes, len);
18194 tp->t_sndrexmitpack++;
18195 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
18196 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
18199 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
18203 KMOD_TCPSTAT_INC(tcps_sndpack);
18204 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
18206 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
18211 * If we're sending everything we've got, set PUSH. (This
18212 * will keep happy those implementations which only give
18213 * data to the user when a buffer fills or a PUSH comes in.)
18215 if (sb_offset + len == sbused(sb) &&
18217 !(flags & TH_SYN)) {
18219 add_flag |= RACK_HAD_PUSH;
18222 SOCKBUF_UNLOCK(sb);
18224 SOCKBUF_UNLOCK(sb);
18225 if (tp->t_flags & TF_ACKNOW)
18226 KMOD_TCPSTAT_INC(tcps_sndacks);
18227 else if (flags & (TH_SYN | TH_FIN | TH_RST))
18228 KMOD_TCPSTAT_INC(tcps_sndctrl);
18230 KMOD_TCPSTAT_INC(tcps_sndwinup);
18232 m = m_gethdr(M_NOWAIT, MT_DATA);
18239 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
18241 M_ALIGN(m, hdrlen);
18244 m->m_data += max_linkhdr;
18247 SOCKBUF_UNLOCK_ASSERT(sb);
18248 m->m_pkthdr.rcvif = (struct ifnet *)0;
18250 mac_inpcb_create_mbuf(inp, m);
18252 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
18255 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
18259 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
18261 th = rack->r_ctl.fsb.th;
18262 udp = rack->r_ctl.fsb.udp;
18266 ulen = hdrlen + len - sizeof(struct ip6_hdr);
18269 ulen = hdrlen + len - sizeof(struct ip);
18270 udp->uh_ulen = htons(ulen);
18275 ip6 = mtod(m, struct ip6_hdr *);
18277 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
18278 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
18279 udp->uh_dport = tp->t_port;
18280 ulen = hdrlen + len - sizeof(struct ip6_hdr);
18281 udp->uh_ulen = htons(ulen);
18282 th = (struct tcphdr *)(udp + 1);
18284 th = (struct tcphdr *)(ip6 + 1);
18285 tcpip_fillheaders(inp, tp->t_port, ip6, th);
18290 ip = mtod(m, struct ip *);
18292 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
18293 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
18294 udp->uh_dport = tp->t_port;
18295 ulen = hdrlen + len - sizeof(struct ip);
18296 udp->uh_ulen = htons(ulen);
18297 th = (struct tcphdr *)(udp + 1);
18299 th = (struct tcphdr *)(ip + 1);
18300 tcpip_fillheaders(inp, tp->t_port, ip, th);
18305 * Fill in fields, remembering maximum advertised window for use in
18306 * delaying messages about window sizes. If resending a FIN, be sure
18307 * not to use a new sequence number.
18309 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
18310 tp->snd_nxt == tp->snd_max)
18313 * If we are starting a connection, send ECN setup SYN packet. If we
18314 * are on a retransmit, we may resend those bits a number of times
18317 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) {
18318 flags |= tcp_ecn_output_syn_sent(tp);
18320 /* Also handle parallel SYN for ECN */
18321 if (TCPS_HAVERCVDSYN(tp->t_state) &&
18322 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
18323 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit);
18324 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
18325 (tp->t_flags2 & TF2_ECN_SND_ECE))
18326 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
18329 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
18330 ip6->ip6_flow |= htonl(ect << 20);
18336 ip->ip_tos &= ~IPTOS_ECN_MASK;
18342 * If we are doing retransmissions, then snd_nxt will not reflect
18343 * the first unsent octet. For ACK only packets, we do not want the
18344 * sequence number of the retransmitted packet, we want the sequence
18345 * number of the next unsent octet. So, if there is no data (and no
18346 * SYN or FIN), use snd_max instead of snd_nxt when filling in
18347 * ti_seq. But if we are in persist state, snd_max might reflect
18348 * one byte beyond the right edge of the window, so use snd_nxt in
18349 * that case, since we know we aren't doing a retransmission.
18350 * (retransmit and persist are mutually exclusive...)
18352 if (sack_rxmit == 0) {
18353 if (len || (flags & (TH_SYN | TH_FIN))) {
18354 th->th_seq = htonl(tp->snd_nxt);
18355 rack_seq = tp->snd_nxt;
18357 th->th_seq = htonl(tp->snd_max);
18358 rack_seq = tp->snd_max;
18361 th->th_seq = htonl(rsm->r_start);
18362 rack_seq = rsm->r_start;
18364 th->th_ack = htonl(tp->rcv_nxt);
18365 tcp_set_flags(th, flags);
18367 * Calculate receive window. Don't shrink window, but avoid silly
18369 * If a RST segment is sent, advertise a window of zero.
18371 if (flags & TH_RST) {
18374 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
18375 recwin < (long)segsiz) {
18378 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
18379 recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
18380 recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
18384 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
18385 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is
18386 * handled in syncache.
18388 if (flags & TH_SYN)
18389 th->th_win = htons((u_short)
18390 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
18392 /* Avoid shrinking window with window scaling. */
18393 recwin = roundup2(recwin, 1 << tp->rcv_scale);
18394 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
18397 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
18398 * window. This may cause the remote transmitter to stall. This
18399 * flag tells soreceive() to disable delayed acknowledgements when
18400 * draining the buffer. This can occur if the receiver is
18401 * attempting to read more data than can be buffered prior to
18402 * transmitting on the connection.
18404 if (th->th_win == 0) {
18405 tp->t_sndzerowin++;
18406 tp->t_flags |= TF_RXWIN0SENT;
18408 tp->t_flags &= ~TF_RXWIN0SENT;
18409 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
18410 /* Now are we using fsb?, if so copy the template data to the mbuf */
18411 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
18414 cpto = mtod(m, uint8_t *);
18415 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
18417 * We have just copied in:
18419 * <optional udphdr>
18420 * tcphdr (no options)
18422 * We need to grab the correct pointers into the mbuf
18423 * for both the tcp header, and possibly the udp header (if tunneling).
18424 * We do this by using the offset in the copy buffer and adding it
18425 * to the mbuf base pointer (cpto).
18429 ip6 = mtod(m, struct ip6_hdr *);
18433 ip = mtod(m, struct ip *);
18435 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
18436 /* If we have a udp header lets set it into the mbuf as well */
18438 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr));
18440 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
18441 if (to.to_flags & TOF_SIGNATURE) {
18443 * Calculate MD5 signature and put it into the place
18444 * determined before.
18445 * NOTE: since TCP options buffer doesn't point into
18446 * mbuf's data, calculate offset and use it.
18448 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
18449 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
18451 * Do not send segment if the calculation of MD5
18452 * digest has failed.
18459 bcopy(opt, th + 1, optlen);
18460 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
18463 * Put TCP length in extended header, and then checksum extended
18466 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
18470 * ip6_plen is not need to be filled now, and will be filled
18474 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
18475 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
18476 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
18477 th->th_sum = htons(0);
18478 UDPSTAT_INC(udps_opackets);
18480 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
18481 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
18482 th->th_sum = in6_cksum_pseudo(ip6,
18483 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
18488 #if defined(INET6) && defined(INET)
18494 m->m_pkthdr.csum_flags = CSUM_UDP;
18495 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
18496 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
18497 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
18498 th->th_sum = htons(0);
18499 UDPSTAT_INC(udps_opackets);
18501 m->m_pkthdr.csum_flags = CSUM_TCP;
18502 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
18503 th->th_sum = in_pseudo(ip->ip_src.s_addr,
18504 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
18505 IPPROTO_TCP + len + optlen));
18507 /* IP version must be set here for ipv4/ipv6 checking later */
18508 KASSERT(ip->ip_v == IPVERSION,
18509 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
18513 * Enable TSO and specify the size of the segments. The TCP pseudo
18514 * header checksum is always provided. XXX: Fixme: This is currently
18515 * not the case for IPv6.
18518 KASSERT(len > tp->t_maxseg - optlen,
18519 ("%s: len <= tso_segsz", __func__));
18520 m->m_pkthdr.csum_flags |= CSUM_TSO;
18521 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
18523 KASSERT(len + hdrlen == m_length(m, NULL),
18524 ("%s: mbuf chain different than expected: %d + %u != %u",
18525 __func__, len, hdrlen, m_length(m, NULL)));
18528 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
18529 hhook_run_tcp_est_out(tp, th, &to, len, tso);
18531 /* We're getting ready to send; log now. */
18532 if (tcp_bblogging_on(rack->rc_tp)) {
18533 union tcp_log_stackspecific log;
18535 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
18536 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
18537 if (rack->rack_no_prr)
18538 log.u_bbr.flex1 = 0;
18540 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
18541 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
18542 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
18543 log.u_bbr.flex4 = orig_len;
18544 /* Save off the early/late values */
18545 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
18546 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
18547 log.u_bbr.bw_inuse = rack_get_bw(rack);
18548 log.u_bbr.flex8 = 0;
18550 if (rsm->r_flags & RACK_RWND_COLLAPSED) {
18551 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm);
18552 counter_u64_add(rack_collapsed_win_rxt, 1);
18553 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start));
18556 log.u_bbr.flex8 = 2;
18558 log.u_bbr.flex8 = 1;
18561 log.u_bbr.flex8 = 3;
18563 log.u_bbr.flex8 = 0;
18565 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
18566 log.u_bbr.flex7 = mark;
18567 log.u_bbr.flex7 <<= 8;
18568 log.u_bbr.flex7 |= pass;
18569 log.u_bbr.pkts_out = tp->t_maxseg;
18570 log.u_bbr.timeStamp = cts;
18571 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
18572 log.u_bbr.lt_epoch = cwnd_to_use;
18573 log.u_bbr.delivered = sendalot;
18574 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
18575 len, &log, false, NULL, NULL, 0, &tv);
18580 * Fill in IP length and desired time to live and send to IP level.
18581 * There should be a better way to handle ttl and tos; we could keep
18582 * them in the template, but need a way to checksum without them.
18585 * m->m_pkthdr.len should have been set before cksum calcuration,
18586 * because in6_cksum() need it.
18591 * we separately set hoplimit for every segment, since the
18592 * user might want to change the value via setsockopt. Also,
18593 * desired default hop limit might be changed via Neighbor
18596 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL);
18599 * Set the packet size here for the benefit of DTrace
18600 * probes. ip6_output() will set it properly; it's supposed
18601 * to include the option header lengths as well.
18603 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
18605 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
18606 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
18608 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
18610 if (tp->t_state == TCPS_SYN_SENT)
18611 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
18613 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
18614 /* TODO: IPv6 IP6TOS_ECT bit on */
18615 error = ip6_output(m,
18616 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
18617 inp->in6p_outputopts,
18622 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0),
18625 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
18626 mtu = inp->inp_route6.ro_nh->nh_mtu;
18629 #if defined(INET) && defined(INET6)
18634 ip->ip_len = htons(m->m_pkthdr.len);
18636 if (inp->inp_vflag & INP_IPV6PROTO)
18637 ip->ip_ttl = in6_selecthlim(inp, NULL);
18639 rack->r_ctl.fsb.hoplimit = ip->ip_ttl;
18641 * If we do path MTU discovery, then we set DF on every
18642 * packet. This might not be the best thing to do according
18643 * to RFC3390 Section 2. However the tcp hostcache migitates
18644 * the problem so it affects only the first tcp connection
18647 * NB: Don't set DF on small MTU/MSS to have a safe
18650 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
18651 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
18652 if (tp->t_port == 0 || len < V_tcp_minmss) {
18653 ip->ip_off |= htons(IP_DF);
18656 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
18659 if (tp->t_state == TCPS_SYN_SENT)
18660 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
18662 TCP_PROBE5(send, NULL, tp, ip, tp, th);
18664 error = ip_output(m,
18665 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
18671 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0,
18673 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
18674 mtu = inp->inp_route.ro_nh->nh_mtu;
18680 lgb->tlb_errno = error;
18684 * In transmit state, time the transmission and arrange for the
18685 * retransmit. In persist state, just set snd_max.
18688 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls);
18689 if (rsm && doing_tlp) {
18690 rack->rc_last_sent_tlp_past_cumack = 0;
18691 rack->rc_last_sent_tlp_seq_valid = 1;
18692 rack->r_ctl.last_sent_tlp_seq = rsm->r_start;
18693 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start;
18695 rack->forced_ack = 0; /* If we send something zap the FA flag */
18696 if (rsm && (doing_tlp == 0)) {
18697 /* Set we retransmitted */
18698 rack->rc_gp_saw_rec = 1;
18700 if (cwnd_to_use > tp->snd_ssthresh) {
18701 /* Set we sent in CA */
18702 rack->rc_gp_saw_ca = 1;
18704 /* Set we sent in SS */
18705 rack->rc_gp_saw_ss = 1;
18708 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
18709 (tp->t_flags & TF_SACK_PERMIT) &&
18710 tp->rcv_numsacks > 0)
18711 tcp_clean_dsack_blocks(tp);
18712 tot_len_this_send += len;
18714 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
18715 else if (len == 1) {
18716 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
18717 } else if (len > 1) {
18720 idx = (len / segsiz) + 3;
18721 if (idx >= TCP_MSS_ACCT_ATIMER)
18722 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
18724 counter_u64_add(rack_out_size[idx], 1);
18727 if ((rack->rack_no_prr == 0) &&
18730 if (rack->r_ctl.rc_prr_sndcnt >= len)
18731 rack->r_ctl.rc_prr_sndcnt -= len;
18733 rack->r_ctl.rc_prr_sndcnt = 0;
18737 /* Make sure the TLP is added */
18738 add_flag |= RACK_TLP;
18740 /* If its a resend without TLP then it must not have the flag */
18741 rsm->r_flags &= ~RACK_TLP;
18743 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error,
18744 rack_to_usec_ts(&tv),
18745 rsm, add_flag, s_mb, s_moff, hw_tls);
18748 if ((error == 0) &&
18750 (tp->snd_una == tp->snd_max))
18751 rack->r_ctl.rc_tlp_rxt_last_time = cts;
18753 tcp_seq startseq = tp->snd_nxt;
18755 /* Track our lost count */
18756 if (rsm && (doing_tlp == 0))
18757 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start;
18759 * Advance snd_nxt over sequence space of this segment.
18762 /* We don't log or do anything with errors */
18764 if (doing_tlp == 0) {
18767 * Not a retransmission of some
18768 * sort, new data is going out so
18769 * clear our TLP count and flag.
18771 rack->rc_tlp_in_progress = 0;
18772 rack->r_ctl.rc_tlp_cnt_out = 0;
18776 * We have just sent a TLP, mark that it is true
18777 * and make sure our in progress is set so we
18778 * continue to check the count.
18780 rack->rc_tlp_in_progress = 1;
18781 rack->r_ctl.rc_tlp_cnt_out++;
18783 if (flags & (TH_SYN | TH_FIN)) {
18784 if (flags & TH_SYN)
18786 if (flags & TH_FIN) {
18788 tp->t_flags |= TF_SENTFIN;
18791 /* In the ENOBUFS case we do *not* update snd_max */
18795 tp->snd_nxt += len;
18796 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
18797 if (tp->snd_una == tp->snd_max) {
18799 * Update the time we just added data since
18800 * none was outstanding.
18802 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
18803 tp->t_acktime = ticks;
18805 tp->snd_max = tp->snd_nxt;
18807 * Time this transmission if not a retransmission and
18808 * not currently timing anything.
18809 * This is only relevant in case of switching back to
18812 if (tp->t_rtttime == 0) {
18813 tp->t_rtttime = ticks;
18814 tp->t_rtseq = startseq;
18815 KMOD_TCPSTAT_INC(tcps_segstimed);
18818 ((tp->t_flags & TF_GPUTINPROG) == 0))
18819 rack_start_gp_measurement(tp, rack, startseq, sb_offset);
18822 * If we are doing FO we need to update the mbuf position and subtract
18823 * this happens when the peer sends us duplicate information and
18824 * we thus want to send a DSACK.
18826 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO
18827 * turned off? If not then we are going to echo multiple DSACK blocks
18828 * out (with the TSO), which we should not be doing.
18830 if (rack->r_fast_output && len) {
18831 if (rack->r_ctl.fsb.left_to_send > len)
18832 rack->r_ctl.fsb.left_to_send -= len;
18834 rack->r_ctl.fsb.left_to_send = 0;
18835 if (rack->r_ctl.fsb.left_to_send < segsiz)
18836 rack->r_fast_output = 0;
18837 if (rack->r_fast_output) {
18838 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
18839 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
18845 rack->r_ctl.rc_agg_delayed = 0;
18848 rack->r_ctl.rc_agg_early = 0;
18849 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */
18851 * Failures do not advance the seq counter above. For the
18852 * case of ENOBUFS we will fall out and retry in 1ms with
18853 * the hpts. Everything else will just have to retransmit
18856 * In any case, we do not want to loop around for another
18857 * send without a good reason.
18862 tp->t_softerror = error;
18863 #ifdef TCP_ACCOUNTING
18864 crtsc = get_cyclecount();
18865 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18866 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
18868 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18869 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
18876 * Pace us right away to retry in a some
18879 if (rack->r_ctl.crte != NULL) {
18880 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF);
18882 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF);
18883 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
18884 if (rack->rc_enobuf < 0x7f)
18886 if (slot < (10 * HPTS_USEC_IN_MSEC))
18887 slot = 10 * HPTS_USEC_IN_MSEC;
18888 if (rack->r_ctl.crte != NULL) {
18889 counter_u64_add(rack_saw_enobuf_hw, 1);
18890 tcp_rl_log_enobuf(rack->r_ctl.crte);
18892 counter_u64_add(rack_saw_enobuf, 1);
18896 * For some reason the interface we used initially
18897 * to send segments changed to another or lowered
18898 * its MTU. If TSO was active we either got an
18899 * interface without TSO capabilits or TSO was
18900 * turned off. If we obtained mtu from ip_output()
18901 * then update it and try again.
18904 tp->t_flags &= ~TF_TSO;
18906 tcp_mss_update(tp, -1, mtu, NULL, NULL);
18909 slot = 10 * HPTS_USEC_IN_MSEC;
18910 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
18911 #ifdef TCP_ACCOUNTING
18912 crtsc = get_cyclecount();
18913 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18914 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
18916 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18917 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
18923 counter_u64_add(rack_saw_enetunreach, 1);
18927 if (TCPS_HAVERCVDSYN(tp->t_state)) {
18928 tp->t_softerror = error;
18932 slot = 10 * HPTS_USEC_IN_MSEC;
18933 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
18934 #ifdef TCP_ACCOUNTING
18935 crtsc = get_cyclecount();
18936 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18937 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
18939 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18940 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
18947 rack->rc_enobuf = 0;
18948 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
18949 rack->r_ctl.retran_during_recovery += len;
18951 KMOD_TCPSTAT_INC(tcps_sndtotal);
18954 * Data sent (as far as we can tell). If this advertises a larger
18955 * window than any other segment, then remember the size of the
18956 * advertised window. Any pending ACK has now been sent.
18958 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
18959 tp->rcv_adv = tp->rcv_nxt + recwin;
18961 tp->last_ack_sent = tp->rcv_nxt;
18962 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
18965 /* Do we need to turn off sendalot? */
18966 if (rack->r_ctl.rc_pace_max_segs &&
18967 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) {
18968 /* We hit our max. */
18970 } else if ((rack->rc_user_set_max_segs) &&
18971 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) {
18972 /* We hit the user defined max */
18976 if ((error == 0) && (flags & TH_FIN))
18977 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN);
18978 if (flags & TH_RST) {
18980 * We don't send again after sending a RST.
18985 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
18986 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) {
18988 * Get our pacing rate, if an error
18989 * occurred in sending (ENOBUF) we would
18990 * hit the else if with slot preset. Other
18993 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz);
18996 (rsm->r_flags & RACK_HAS_SYN) == 0 &&
18997 rack->use_rack_rr) {
18998 /* Its a retransmit and we use the rack cheat? */
19000 (rack->rc_always_pace == 0) ||
19001 (rack->r_rr_config == 1)) {
19003 * We have no pacing set or we
19004 * are using old-style rack or
19005 * we are overridden to use the old 1ms pacing.
19007 slot = rack->r_ctl.rc_min_to;
19010 /* We have sent clear the flag */
19011 rack->r_ent_rec_ns = 0;
19012 if (rack->r_must_retran) {
19014 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
19015 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
19017 * We have retransmitted all.
19019 rack->r_must_retran = 0;
19020 rack->r_ctl.rc_out_at_rto = 0;
19022 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
19024 * Sending new data will also kill
19027 rack->r_must_retran = 0;
19028 rack->r_ctl.rc_out_at_rto = 0;
19031 rack->r_ctl.fsb.recwin = recwin;
19032 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) &&
19033 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
19035 * We hit an RTO and now have past snd_max at the RTO
19036 * clear all the WAS flags.
19038 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY);
19041 /* set the rack tcb into the slot N */
19042 if ((error == 0) &&
19044 ((flags & (TH_SYN|TH_FIN)) == 0) &&
19046 (tp->snd_nxt == tp->snd_max) &&
19048 (tp->rcv_numsacks == 0) &&
19049 rack->r_fsb_inited &&
19050 TCPS_HAVEESTABLISHED(tp->t_state) &&
19051 (rack->r_must_retran == 0) &&
19052 ((tp->t_flags & TF_NEEDFIN) == 0) &&
19053 (len > 0) && (orig_len > 0) &&
19054 (orig_len > len) &&
19055 ((orig_len - len) >= segsiz) &&
19057 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
19058 /* We can send at least one more MSS using our fsb */
19060 rack->r_fast_output = 1;
19061 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
19062 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
19063 rack->r_ctl.fsb.tcp_flags = flags;
19064 rack->r_ctl.fsb.left_to_send = orig_len - len;
19066 rack->r_ctl.fsb.hw_tls = 1;
19068 rack->r_ctl.fsb.hw_tls = 0;
19069 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
19070 ("rack:%p left_to_send:%u sbavail:%u out:%u",
19071 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
19072 (tp->snd_max - tp->snd_una)));
19073 if (rack->r_ctl.fsb.left_to_send < segsiz)
19074 rack->r_fast_output = 0;
19076 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
19077 rack->r_ctl.fsb.rfo_apply_push = 1;
19079 rack->r_ctl.fsb.rfo_apply_push = 0;
19082 rack->r_fast_output = 0;
19083 rack_log_fsb(rack, tp, so, flags,
19084 ipoptlen, orig_len, len, error,
19085 (rsm == NULL), optlen, __LINE__, 2);
19086 } else if (sendalot) {
19090 if ((error == 0) &&
19092 ((flags & (TH_SYN|TH_FIN)) == 0) &&
19095 (tp->rcv_numsacks == 0) &&
19096 (tp->snd_nxt == tp->snd_max) &&
19097 (rack->r_must_retran == 0) &&
19098 rack->r_fsb_inited &&
19099 TCPS_HAVEESTABLISHED(tp->t_state) &&
19100 ((tp->t_flags & TF_NEEDFIN) == 0) &&
19101 (len > 0) && (orig_len > 0) &&
19102 (orig_len > len) &&
19103 ((orig_len - len) >= segsiz) &&
19105 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
19106 /* we can use fast_output for more */
19108 rack->r_fast_output = 1;
19109 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
19110 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
19111 rack->r_ctl.fsb.tcp_flags = flags;
19112 rack->r_ctl.fsb.left_to_send = orig_len - len;
19114 rack->r_ctl.fsb.hw_tls = 1;
19116 rack->r_ctl.fsb.hw_tls = 0;
19117 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
19118 ("rack:%p left_to_send:%u sbavail:%u out:%u",
19119 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
19120 (tp->snd_max - tp->snd_una)));
19121 if (rack->r_ctl.fsb.left_to_send < segsiz) {
19122 rack->r_fast_output = 0;
19124 if (rack->r_fast_output) {
19125 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
19126 rack->r_ctl.fsb.rfo_apply_push = 1;
19128 rack->r_ctl.fsb.rfo_apply_push = 0;
19129 rack_log_fsb(rack, tp, so, flags,
19130 ipoptlen, orig_len, len, error,
19131 (rsm == NULL), optlen, __LINE__, 3);
19133 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
19143 /* Assure when we leave that snd_nxt will point to top */
19144 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
19145 tp->snd_nxt = tp->snd_max;
19146 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0);
19147 #ifdef TCP_ACCOUNTING
19148 crtsc = get_cyclecount() - ts_val;
19149 if (tot_len_this_send) {
19150 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19151 tp->tcp_cnt_counters[SND_OUT_DATA]++;
19153 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19154 tp->tcp_proc_time[SND_OUT_DATA] += crtsc;
19156 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19157 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz);
19160 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19161 tp->tcp_cnt_counters[SND_OUT_ACK]++;
19163 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19164 tp->tcp_proc_time[SND_OUT_ACK] += crtsc;
19169 if (error == ENOBUFS)
19175 rack_update_seg(struct tcp_rack *rack)
19179 orig_val = rack->r_ctl.rc_pace_max_segs;
19180 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
19181 if (orig_val != rack->r_ctl.rc_pace_max_segs)
19182 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0);
19186 rack_mtu_change(struct tcpcb *tp)
19189 * The MSS may have changed
19191 struct tcp_rack *rack;
19192 struct rack_sendmap *rsm;
19194 rack = (struct tcp_rack *)tp->t_fb_ptr;
19195 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) {
19197 * The MTU has changed we need to resend everything
19198 * since all we have sent is lost. We first fix
19199 * up the mtu though.
19201 rack_set_pace_segments(tp, rack, __LINE__, NULL);
19202 /* We treat this like a full retransmit timeout without the cwnd adjustment */
19203 rack_remxt_tmr(tp);
19204 rack->r_fast_output = 0;
19205 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp,
19206 rack->r_ctl.rc_sacked);
19207 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
19208 rack->r_must_retran = 1;
19209 /* Mark all inflight to needing to be rxt'd */
19210 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
19211 rsm->r_flags |= RACK_MUST_RXT;
19214 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
19215 /* We don't use snd_nxt to retransmit */
19216 tp->snd_nxt = tp->snd_max;
19220 rack_set_profile(struct tcp_rack *rack, int prof)
19224 /* pace_always=1 */
19225 if (rack->rc_always_pace == 0) {
19226 if (tcp_can_enable_pacing() == 0)
19229 rack->rc_always_pace = 1;
19230 if (rack->use_fixed_rate || rack->gp_ready)
19231 rack_set_cc_pacing(rack);
19232 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19233 rack->rack_attempt_hdwr_pace = 0;
19235 if (rack_use_cmp_acks)
19236 rack->r_use_cmp_ack = 1;
19237 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
19238 rack->r_use_cmp_ack)
19239 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
19241 rack->rack_enable_scwnd = 1;
19243 rack->rc_gp_dyn_mul = 1;
19245 rack->r_ctl.rack_per_of_gp_ca = 100;
19247 rack->r_rr_config = 3;
19249 rack->r_ctl.rc_no_push_at_mrtt = 2;
19251 rack->rc_pace_to_cwnd = 1;
19252 rack->rc_pace_fill_if_rttin_range = 0;
19253 rack->rtt_limit_mul = 0;
19255 rack->rack_no_prr = 1;
19257 rack->r_limit_scw = 1;
19259 rack->r_ctl.rack_per_of_gp_rec = 90;
19262 } else if (prof == 3) {
19263 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */
19264 /* pace_always=1 */
19265 if (rack->rc_always_pace == 0) {
19266 if (tcp_can_enable_pacing() == 0)
19269 rack->rc_always_pace = 1;
19270 if (rack->use_fixed_rate || rack->gp_ready)
19271 rack_set_cc_pacing(rack);
19272 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19273 rack->rack_attempt_hdwr_pace = 0;
19275 if (rack_use_cmp_acks)
19276 rack->r_use_cmp_ack = 1;
19277 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
19278 rack->r_use_cmp_ack)
19279 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
19281 rack->rack_enable_scwnd = 1;
19283 rack->rc_gp_dyn_mul = 1;
19285 rack->r_ctl.rack_per_of_gp_ca = 100;
19287 rack->r_rr_config = 3;
19289 rack->r_ctl.rc_no_push_at_mrtt = 2;
19291 rack->rc_pace_to_cwnd = 1;
19292 rack->r_fill_less_agg = 1;
19293 rack->rc_pace_fill_if_rttin_range = 0;
19294 rack->rtt_limit_mul = 0;
19296 rack->rack_no_prr = 1;
19298 rack->r_limit_scw = 1;
19300 rack->r_ctl.rack_per_of_gp_rec = 90;
19304 } else if (prof == 2) {
19306 if (rack->rc_always_pace == 0) {
19307 if (tcp_can_enable_pacing() == 0)
19310 rack->rc_always_pace = 1;
19311 if (rack->use_fixed_rate || rack->gp_ready)
19312 rack_set_cc_pacing(rack);
19313 rack->r_use_cmp_ack = 1;
19314 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
19315 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
19316 /* pace_always=1 */
19317 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19319 rack->rack_enable_scwnd = 1;
19321 rack->rc_gp_dyn_mul = 1;
19322 rack->r_ctl.rack_per_of_gp_ca = 100;
19324 rack->r_rr_config = 3;
19326 rack->r_ctl.rc_no_push_at_mrtt = 2;
19328 rack->rc_pace_to_cwnd = 1;
19329 rack->rc_pace_fill_if_rttin_range = 0;
19330 rack->rtt_limit_mul = 0;
19332 rack->rack_no_prr = 1;
19334 rack->r_limit_scw = 0;
19336 } else if (prof == 0) {
19337 /* This changes things back to the default settings */
19339 if (rack->rc_always_pace) {
19340 tcp_decrement_paced_conn();
19341 rack_undo_cc_pacing(rack);
19342 rack->rc_always_pace = 0;
19344 if (rack_pace_every_seg && tcp_can_enable_pacing()) {
19345 rack->rc_always_pace = 1;
19346 if (rack->use_fixed_rate || rack->gp_ready)
19347 rack_set_cc_pacing(rack);
19349 rack->rc_always_pace = 0;
19350 if (rack_dsack_std_based & 0x1) {
19351 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */
19352 rack->rc_rack_tmr_std_based = 1;
19354 if (rack_dsack_std_based & 0x2) {
19355 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */
19356 rack->rc_rack_use_dsack = 1;
19358 if (rack_use_cmp_acks)
19359 rack->r_use_cmp_ack = 1;
19361 rack->r_use_cmp_ack = 0;
19362 if (rack_disable_prr)
19363 rack->rack_no_prr = 1;
19365 rack->rack_no_prr = 0;
19366 if (rack_gp_no_rec_chg)
19367 rack->rc_gp_no_rec_chg = 1;
19369 rack->rc_gp_no_rec_chg = 0;
19370 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) {
19371 rack->r_mbuf_queue = 1;
19372 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
19373 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
19374 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19376 rack->r_mbuf_queue = 0;
19377 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
19379 if (rack_enable_shared_cwnd)
19380 rack->rack_enable_scwnd = 1;
19382 rack->rack_enable_scwnd = 0;
19383 if (rack_do_dyn_mul) {
19384 /* When dynamic adjustment is on CA needs to start at 100% */
19385 rack->rc_gp_dyn_mul = 1;
19386 if (rack_do_dyn_mul >= 100)
19387 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
19389 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
19390 rack->rc_gp_dyn_mul = 0;
19392 rack->r_rr_config = 0;
19393 rack->r_ctl.rc_no_push_at_mrtt = 0;
19394 rack->rc_pace_to_cwnd = 0;
19395 rack->rc_pace_fill_if_rttin_range = 0;
19396 rack->rtt_limit_mul = 0;
19398 if (rack_enable_hw_pacing)
19399 rack->rack_hdw_pace_ena = 1;
19401 rack->rack_hdw_pace_ena = 0;
19402 if (rack_disable_prr)
19403 rack->rack_no_prr = 1;
19405 rack->rack_no_prr = 0;
19406 if (rack_limits_scwnd)
19407 rack->r_limit_scw = 1;
19409 rack->r_limit_scw = 0;
19416 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval)
19418 struct deferred_opt_list *dol;
19420 dol = malloc(sizeof(struct deferred_opt_list),
19421 M_TCPFSB, M_NOWAIT|M_ZERO);
19424 * No space yikes -- fail out..
19428 dol->optname = sopt_name;
19429 dol->optval = loptval;
19430 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next);
19435 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name,
19436 uint32_t optval, uint64_t loptval)
19438 struct epoch_tracker et;
19439 struct sockopt sopt;
19440 struct cc_newreno_opts opt;
19441 struct inpcb *inp = tptoinpcb(tp);
19446 switch (sopt_name) {
19448 case TCP_RACK_DSACK_OPT:
19449 RACK_OPTS_INC(tcp_rack_dsack_opt);
19450 if (optval & 0x1) {
19451 rack->rc_rack_tmr_std_based = 1;
19453 rack->rc_rack_tmr_std_based = 0;
19455 if (optval & 0x2) {
19456 rack->rc_rack_use_dsack = 1;
19458 rack->rc_rack_use_dsack = 0;
19460 rack_log_dsack_event(rack, 5, __LINE__, 0, 0);
19462 case TCP_RACK_PACING_BETA:
19463 RACK_OPTS_INC(tcp_rack_beta);
19464 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) {
19465 /* This only works for newreno. */
19469 if (rack->rc_pacing_cc_set) {
19471 * Set them into the real CC module
19472 * whats in the rack pcb is the old values
19473 * to be used on restoral/
19475 sopt.sopt_dir = SOPT_SET;
19476 opt.name = CC_NEWRENO_BETA;
19478 if (CC_ALGO(tp)->ctl_output != NULL)
19479 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
19486 * Not pacing yet so set it into our local
19487 * rack pcb storage.
19489 rack->r_ctl.rc_saved_beta.beta = optval;
19492 case TCP_RACK_TIMER_SLOP:
19493 RACK_OPTS_INC(tcp_rack_timer_slop);
19494 rack->r_ctl.timer_slop = optval;
19495 if (rack->rc_tp->t_srtt) {
19497 * If we have an SRTT lets update t_rxtcur
19498 * to have the new slop.
19500 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
19501 rack_rto_min, rack_rto_max,
19502 rack->r_ctl.timer_slop);
19505 case TCP_RACK_PACING_BETA_ECN:
19506 RACK_OPTS_INC(tcp_rack_beta_ecn);
19507 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) {
19508 /* This only works for newreno. */
19512 if (rack->rc_pacing_cc_set) {
19514 * Set them into the real CC module
19515 * whats in the rack pcb is the old values
19516 * to be used on restoral/
19518 sopt.sopt_dir = SOPT_SET;
19519 opt.name = CC_NEWRENO_BETA_ECN;
19521 if (CC_ALGO(tp)->ctl_output != NULL)
19522 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
19527 * Not pacing yet so set it into our local
19528 * rack pcb storage.
19530 rack->r_ctl.rc_saved_beta.beta_ecn = optval;
19531 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED;
19534 case TCP_DEFER_OPTIONS:
19535 RACK_OPTS_INC(tcp_defer_opt);
19537 if (rack->gp_ready) {
19542 rack->defer_options = 1;
19544 rack->defer_options = 0;
19546 case TCP_RACK_MEASURE_CNT:
19547 RACK_OPTS_INC(tcp_rack_measure_cnt);
19548 if (optval && (optval <= 0xff)) {
19549 rack->r_ctl.req_measurements = optval;
19553 case TCP_REC_ABC_VAL:
19554 RACK_OPTS_INC(tcp_rec_abc_val);
19556 rack->r_use_labc_for_rec = 1;
19558 rack->r_use_labc_for_rec = 0;
19560 case TCP_RACK_ABC_VAL:
19561 RACK_OPTS_INC(tcp_rack_abc_val);
19562 if ((optval > 0) && (optval < 255))
19563 rack->rc_labc = optval;
19567 case TCP_HDWR_UP_ONLY:
19568 RACK_OPTS_INC(tcp_pacing_up_only);
19570 rack->r_up_only = 1;
19572 rack->r_up_only = 0;
19574 case TCP_PACING_RATE_CAP:
19575 RACK_OPTS_INC(tcp_pacing_rate_cap);
19576 rack->r_ctl.bw_rate_cap = loptval;
19578 case TCP_RACK_PROFILE:
19579 RACK_OPTS_INC(tcp_profile);
19580 error = rack_set_profile(rack, optval);
19582 case TCP_USE_CMP_ACKS:
19583 RACK_OPTS_INC(tcp_use_cmp_acks);
19584 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) {
19585 /* You can't turn it off once its on! */
19587 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) {
19588 rack->r_use_cmp_ack = 1;
19589 rack->r_mbuf_queue = 1;
19590 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19592 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
19593 inp->inp_flags2 |= INP_MBUF_ACKCMP;
19595 case TCP_SHARED_CWND_TIME_LIMIT:
19596 RACK_OPTS_INC(tcp_lscwnd);
19598 rack->r_limit_scw = 1;
19600 rack->r_limit_scw = 0;
19602 case TCP_RACK_PACE_TO_FILL:
19603 RACK_OPTS_INC(tcp_fillcw);
19605 rack->rc_pace_to_cwnd = 0;
19607 rack->rc_pace_to_cwnd = 1;
19609 rack->r_fill_less_agg = 1;
19611 if ((optval >= rack_gp_rtt_maxmul) &&
19612 rack_gp_rtt_maxmul &&
19614 rack->rc_pace_fill_if_rttin_range = 1;
19615 rack->rtt_limit_mul = optval;
19617 rack->rc_pace_fill_if_rttin_range = 0;
19618 rack->rtt_limit_mul = 0;
19621 case TCP_RACK_NO_PUSH_AT_MAX:
19622 RACK_OPTS_INC(tcp_npush);
19624 rack->r_ctl.rc_no_push_at_mrtt = 0;
19625 else if (optval < 0xff)
19626 rack->r_ctl.rc_no_push_at_mrtt = optval;
19630 case TCP_SHARED_CWND_ENABLE:
19631 RACK_OPTS_INC(tcp_rack_scwnd);
19633 rack->rack_enable_scwnd = 0;
19635 rack->rack_enable_scwnd = 1;
19637 case TCP_RACK_MBUF_QUEUE:
19638 /* Now do we use the LRO mbuf-queue feature */
19639 RACK_OPTS_INC(tcp_rack_mbufq);
19640 if (optval || rack->r_use_cmp_ack)
19641 rack->r_mbuf_queue = 1;
19643 rack->r_mbuf_queue = 0;
19644 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
19645 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19647 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
19649 case TCP_RACK_NONRXT_CFG_RATE:
19650 RACK_OPTS_INC(tcp_rack_cfg_rate);
19652 rack->rack_rec_nonrxt_use_cr = 0;
19654 rack->rack_rec_nonrxt_use_cr = 1;
19657 RACK_OPTS_INC(tcp_rack_noprr);
19659 rack->rack_no_prr = 0;
19660 else if (optval == 1)
19661 rack->rack_no_prr = 1;
19662 else if (optval == 2)
19663 rack->no_prr_addback = 1;
19667 case TCP_TIMELY_DYN_ADJ:
19668 RACK_OPTS_INC(tcp_timely_dyn);
19670 rack->rc_gp_dyn_mul = 0;
19672 rack->rc_gp_dyn_mul = 1;
19673 if (optval >= 100) {
19675 * If the user sets something 100 or more
19676 * its the gp_ca value.
19678 rack->r_ctl.rack_per_of_gp_ca = optval;
19682 case TCP_RACK_DO_DETECTION:
19683 RACK_OPTS_INC(tcp_rack_do_detection);
19685 rack->do_detection = 0;
19687 rack->do_detection = 1;
19689 case TCP_RACK_TLP_USE:
19690 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
19694 RACK_OPTS_INC(tcp_tlp_use);
19695 rack->rack_tlp_threshold_use = optval;
19697 case TCP_RACK_TLP_REDUCE:
19698 /* RACK TLP cwnd reduction (bool) */
19699 RACK_OPTS_INC(tcp_rack_tlp_reduce);
19700 rack->r_ctl.rc_tlp_cwnd_reduce = optval;
19702 /* Pacing related ones */
19703 case TCP_RACK_PACE_ALWAYS:
19705 * zero is old rack method, 1 is new
19706 * method using a pacing rate.
19708 RACK_OPTS_INC(tcp_rack_pace_always);
19710 if (rack->rc_always_pace) {
19713 } else if (tcp_can_enable_pacing()) {
19714 rack->rc_always_pace = 1;
19715 if (rack->use_fixed_rate || rack->gp_ready)
19716 rack_set_cc_pacing(rack);
19723 if (rack->rc_always_pace) {
19724 tcp_decrement_paced_conn();
19725 rack->rc_always_pace = 0;
19726 rack_undo_cc_pacing(rack);
19729 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
19730 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19732 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
19733 /* A rate may be set irate or other, if so set seg size */
19734 rack_update_seg(rack);
19736 case TCP_BBR_RACK_INIT_RATE:
19737 RACK_OPTS_INC(tcp_initial_rate);
19739 /* Change from kbits per second to bytes per second */
19742 rack->r_ctl.init_rate = val;
19743 if (rack->rc_init_win != rack_default_init_window) {
19747 * Options don't always get applied
19748 * in the order you think. So in order
19749 * to assure we update a cwnd we need
19750 * to check and see if we are still
19751 * where we should raise the cwnd.
19753 win = rc_init_window(rack);
19754 if (SEQ_GT(tp->snd_max, tp->iss))
19755 snt = tp->snd_max - tp->iss;
19759 (tp->snd_cwnd < win))
19760 tp->snd_cwnd = win;
19762 if (rack->rc_always_pace)
19763 rack_update_seg(rack);
19765 case TCP_BBR_IWINTSO:
19766 RACK_OPTS_INC(tcp_initial_win);
19767 if (optval && (optval <= 0xff)) {
19770 rack->rc_init_win = optval;
19771 win = rc_init_window(rack);
19772 if (SEQ_GT(tp->snd_max, tp->iss))
19773 snt = tp->snd_max - tp->iss;
19778 #ifdef NETFLIX_PEAKRATE
19779 tp->t_maxpeakrate |
19781 rack->r_ctl.init_rate)) {
19783 * We are not past the initial window
19784 * and we have some bases for pacing,
19785 * so we need to possibly adjust up
19786 * the cwnd. Note even if we don't set
19787 * the cwnd, its still ok to raise the rc_init_win
19788 * which can be used coming out of idle when we
19789 * would have a rate.
19791 if (tp->snd_cwnd < win)
19792 tp->snd_cwnd = win;
19794 if (rack->rc_always_pace)
19795 rack_update_seg(rack);
19799 case TCP_RACK_FORCE_MSEG:
19800 RACK_OPTS_INC(tcp_rack_force_max_seg);
19802 rack->rc_force_max_seg = 1;
19804 rack->rc_force_max_seg = 0;
19806 case TCP_RACK_PACE_MAX_SEG:
19807 /* Max segments size in a pace in bytes */
19808 RACK_OPTS_INC(tcp_rack_max_seg);
19809 rack->rc_user_set_max_segs = optval;
19810 rack_set_pace_segments(tp, rack, __LINE__, NULL);
19812 case TCP_RACK_PACE_RATE_REC:
19813 /* Set the fixed pacing rate in Bytes per second ca */
19814 RACK_OPTS_INC(tcp_rack_pace_rate_rec);
19815 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
19816 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
19817 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
19818 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
19819 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
19820 rack->use_fixed_rate = 1;
19821 if (rack->rc_always_pace)
19822 rack_set_cc_pacing(rack);
19823 rack_log_pacing_delay_calc(rack,
19824 rack->r_ctl.rc_fixed_pacing_rate_ss,
19825 rack->r_ctl.rc_fixed_pacing_rate_ca,
19826 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
19830 case TCP_RACK_PACE_RATE_SS:
19831 /* Set the fixed pacing rate in Bytes per second ca */
19832 RACK_OPTS_INC(tcp_rack_pace_rate_ss);
19833 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
19834 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
19835 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
19836 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
19837 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
19838 rack->use_fixed_rate = 1;
19839 if (rack->rc_always_pace)
19840 rack_set_cc_pacing(rack);
19841 rack_log_pacing_delay_calc(rack,
19842 rack->r_ctl.rc_fixed_pacing_rate_ss,
19843 rack->r_ctl.rc_fixed_pacing_rate_ca,
19844 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
19845 __LINE__, NULL, 0);
19848 case TCP_RACK_PACE_RATE_CA:
19849 /* Set the fixed pacing rate in Bytes per second ca */
19850 RACK_OPTS_INC(tcp_rack_pace_rate_ca);
19851 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
19852 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
19853 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
19854 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
19855 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
19856 rack->use_fixed_rate = 1;
19857 if (rack->rc_always_pace)
19858 rack_set_cc_pacing(rack);
19859 rack_log_pacing_delay_calc(rack,
19860 rack->r_ctl.rc_fixed_pacing_rate_ss,
19861 rack->r_ctl.rc_fixed_pacing_rate_ca,
19862 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
19863 __LINE__, NULL, 0);
19865 case TCP_RACK_GP_INCREASE_REC:
19866 RACK_OPTS_INC(tcp_gp_inc_rec);
19867 rack->r_ctl.rack_per_of_gp_rec = optval;
19868 rack_log_pacing_delay_calc(rack,
19869 rack->r_ctl.rack_per_of_gp_ss,
19870 rack->r_ctl.rack_per_of_gp_ca,
19871 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
19872 __LINE__, NULL, 0);
19874 case TCP_RACK_GP_INCREASE_CA:
19875 RACK_OPTS_INC(tcp_gp_inc_ca);
19879 * We don't allow any reduction
19885 rack->r_ctl.rack_per_of_gp_ca = ca;
19886 rack_log_pacing_delay_calc(rack,
19887 rack->r_ctl.rack_per_of_gp_ss,
19888 rack->r_ctl.rack_per_of_gp_ca,
19889 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
19890 __LINE__, NULL, 0);
19892 case TCP_RACK_GP_INCREASE_SS:
19893 RACK_OPTS_INC(tcp_gp_inc_ss);
19897 * We don't allow any reduction
19903 rack->r_ctl.rack_per_of_gp_ss = ss;
19904 rack_log_pacing_delay_calc(rack,
19905 rack->r_ctl.rack_per_of_gp_ss,
19906 rack->r_ctl.rack_per_of_gp_ca,
19907 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
19908 __LINE__, NULL, 0);
19910 case TCP_RACK_RR_CONF:
19911 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate);
19912 if (optval && optval <= 3)
19913 rack->r_rr_config = optval;
19915 rack->r_rr_config = 0;
19917 case TCP_HDWR_RATE_CAP:
19918 RACK_OPTS_INC(tcp_hdwr_rate_cap);
19920 if (rack->r_rack_hw_rate_caps == 0)
19921 rack->r_rack_hw_rate_caps = 1;
19925 rack->r_rack_hw_rate_caps = 0;
19928 case TCP_BBR_HDWR_PACE:
19929 RACK_OPTS_INC(tcp_hdwr_pacing);
19931 if (rack->rack_hdrw_pacing == 0) {
19932 rack->rack_hdw_pace_ena = 1;
19933 rack->rack_attempt_hdwr_pace = 0;
19937 rack->rack_hdw_pace_ena = 0;
19939 if (rack->r_ctl.crte != NULL) {
19940 rack->rack_hdrw_pacing = 0;
19941 rack->rack_attempt_hdwr_pace = 0;
19942 tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
19943 rack->r_ctl.crte = NULL;
19948 /* End Pacing related ones */
19949 case TCP_RACK_PRR_SENDALOT:
19950 /* Allow PRR to send more than one seg */
19951 RACK_OPTS_INC(tcp_rack_prr_sendalot);
19952 rack->r_ctl.rc_prr_sendalot = optval;
19954 case TCP_RACK_MIN_TO:
19955 /* Minimum time between rack t-o's in ms */
19956 RACK_OPTS_INC(tcp_rack_min_to);
19957 rack->r_ctl.rc_min_to = optval;
19959 case TCP_RACK_EARLY_SEG:
19960 /* If early recovery max segments */
19961 RACK_OPTS_INC(tcp_rack_early_seg);
19962 rack->r_ctl.rc_early_recovery_segs = optval;
19964 case TCP_RACK_ENABLE_HYSTART:
19967 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED;
19968 if (rack_do_hystart > RACK_HYSTART_ON)
19969 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND;
19970 if (rack_do_hystart > RACK_HYSTART_ON_W_SC)
19971 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH;
19973 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH);
19977 case TCP_RACK_REORD_THRESH:
19978 /* RACK reorder threshold (shift amount) */
19979 RACK_OPTS_INC(tcp_rack_reord_thresh);
19980 if ((optval > 0) && (optval < 31))
19981 rack->r_ctl.rc_reorder_shift = optval;
19985 case TCP_RACK_REORD_FADE:
19986 /* Does reordering fade after ms time */
19987 RACK_OPTS_INC(tcp_rack_reord_fade);
19988 rack->r_ctl.rc_reorder_fade = optval;
19990 case TCP_RACK_TLP_THRESH:
19991 /* RACK TLP theshold i.e. srtt+(srtt/N) */
19992 RACK_OPTS_INC(tcp_rack_tlp_thresh);
19994 rack->r_ctl.rc_tlp_threshold = optval;
19998 case TCP_BBR_USE_RACK_RR:
19999 RACK_OPTS_INC(tcp_rack_rr);
20001 rack->use_rack_rr = 1;
20003 rack->use_rack_rr = 0;
20005 case TCP_FAST_RSM_HACK:
20006 RACK_OPTS_INC(tcp_rack_fastrsm_hack);
20008 rack->fast_rsm_hack = 1;
20010 rack->fast_rsm_hack = 0;
20012 case TCP_RACK_PKT_DELAY:
20013 /* RACK added ms i.e. rack-rtt + reord + N */
20014 RACK_OPTS_INC(tcp_rack_pkt_delay);
20015 rack->r_ctl.rc_pkt_delay = optval;
20018 RACK_OPTS_INC(tcp_rack_delayed_ack);
20020 tp->t_delayed_ack = 0;
20022 tp->t_delayed_ack = 1;
20023 if (tp->t_flags & TF_DELACK) {
20024 tp->t_flags &= ~TF_DELACK;
20025 tp->t_flags |= TF_ACKNOW;
20026 NET_EPOCH_ENTER(et);
20028 NET_EPOCH_EXIT(et);
20032 case TCP_BBR_RACK_RTT_USE:
20033 RACK_OPTS_INC(tcp_rack_rtt_use);
20034 if ((optval != USE_RTT_HIGH) &&
20035 (optval != USE_RTT_LOW) &&
20036 (optval != USE_RTT_AVG))
20039 rack->r_ctl.rc_rate_sample_method = optval;
20041 case TCP_DATA_AFTER_CLOSE:
20042 RACK_OPTS_INC(tcp_data_after_close);
20044 rack->rc_allow_data_af_clo = 1;
20046 rack->rc_allow_data_af_clo = 0;
20051 #ifdef NETFLIX_STATS
20052 tcp_log_socket_option(tp, sopt_name, optval, error);
20059 rack_apply_deferred_options(struct tcp_rack *rack)
20061 struct deferred_opt_list *dol, *sdol;
20064 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) {
20065 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
20066 /* Disadvantage of deferal is you loose the error return */
20067 s_optval = (uint32_t)dol->optval;
20068 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval);
20069 free(dol, M_TCPDO);
20074 rack_hw_tls_change(struct tcpcb *tp, int chg)
20076 /* Update HW tls state */
20077 struct tcp_rack *rack;
20079 rack = (struct tcp_rack *)tp->t_fb_ptr;
20081 rack->r_ctl.fsb.hw_tls = 1;
20083 rack->r_ctl.fsb.hw_tls = 0;
20087 rack_pru_options(struct tcpcb *tp, int flags)
20089 if (flags & PRUS_OOB)
20090 return (EOPNOTSUPP);
20094 static struct tcp_function_block __tcp_rack = {
20095 .tfb_tcp_block_name = __XSTRING(STACKNAME),
20096 .tfb_tcp_output = rack_output,
20097 .tfb_do_queued_segments = ctf_do_queued_segments,
20098 .tfb_do_segment_nounlock = rack_do_segment_nounlock,
20099 .tfb_tcp_do_segment = rack_do_segment,
20100 .tfb_tcp_ctloutput = rack_ctloutput,
20101 .tfb_tcp_fb_init = rack_init,
20102 .tfb_tcp_fb_fini = rack_fini,
20103 .tfb_tcp_timer_stop_all = rack_stopall,
20104 .tfb_tcp_rexmit_tmr = rack_remxt_tmr,
20105 .tfb_tcp_handoff_ok = rack_handoff_ok,
20106 .tfb_tcp_mtu_chg = rack_mtu_change,
20107 .tfb_pru_options = rack_pru_options,
20108 .tfb_hwtls_change = rack_hw_tls_change,
20109 .tfb_compute_pipe = rack_compute_pipe,
20110 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP,
20114 * rack_ctloutput() must drop the inpcb lock before performing copyin on
20115 * socket option arguments. When it re-acquires the lock after the copy, it
20116 * has to revalidate that the connection is still valid for the socket
20120 rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt)
20123 struct ip6_hdr *ip6;
20129 struct tcp_rack *rack;
20131 int32_t error = 0, optval;
20133 tp = intotcpcb(inp);
20134 rack = (struct tcp_rack *)tp->t_fb_ptr;
20135 if (rack == NULL) {
20140 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
20143 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
20146 switch (sopt->sopt_level) {
20149 MPASS(inp->inp_vflag & INP_IPV6PROTO);
20150 switch (sopt->sopt_name) {
20151 case IPV6_USE_MIN_MTU:
20152 tcp6_use_min_mtu(tp);
20156 * The DSCP codepoint has changed, update the fsb.
20158 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
20159 (rack->rc_inp->inp_flow & IPV6_FLOWINFO_MASK);
20167 switch (sopt->sopt_name) {
20170 * The DSCP codepoint has changed, update the fsb.
20172 ip->ip_tos = rack->rc_inp->inp_ip_tos;
20176 * The TTL has changed, update the fsb.
20178 ip->ip_ttl = rack->rc_inp->inp_ip_ttl;
20186 switch (sopt->sopt_name) {
20187 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */
20188 /* Pacing related ones */
20189 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */
20190 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */
20191 case TCP_BBR_IWINTSO: /* URL:tso_iwin */
20192 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */
20193 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */
20194 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */
20195 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/
20196 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */
20197 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */
20198 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */
20199 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */
20200 case TCP_RACK_RR_CONF: /* URL:rrr_conf */
20201 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */
20202 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */
20203 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */
20204 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */
20205 /* End pacing related */
20206 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */
20207 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */
20208 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */
20209 case TCP_RACK_MIN_TO: /* URL:min_to */
20210 case TCP_RACK_EARLY_SEG: /* URL:early_seg */
20211 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */
20212 case TCP_RACK_REORD_FADE: /* URL:reord_fade */
20213 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */
20214 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */
20215 case TCP_RACK_TLP_USE: /* URL:tlp_use */
20216 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */
20217 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */
20218 case TCP_RACK_DO_DETECTION: /* URL:detect */
20219 case TCP_NO_PRR: /* URL:noprr */
20220 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */
20221 case TCP_DATA_AFTER_CLOSE: /* no URL */
20222 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */
20223 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */
20224 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */
20225 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */
20226 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */
20227 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */
20228 case TCP_RACK_PROFILE: /* URL:profile */
20229 case TCP_USE_CMP_ACKS: /* URL:cmpack */
20230 case TCP_RACK_ABC_VAL: /* URL:labc */
20231 case TCP_REC_ABC_VAL: /* URL:reclabc */
20232 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */
20233 case TCP_DEFER_OPTIONS: /* URL:defer */
20234 case TCP_RACK_DSACK_OPT: /* URL:dsack */
20235 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */
20236 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */
20237 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */
20238 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */
20241 /* Filter off all unknown options to the base stack */
20242 return (tcp_default_ctloutput(inp, sopt));
20246 if (sopt->sopt_name == TCP_PACING_RATE_CAP) {
20247 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval));
20249 * We truncate it down to 32 bits for the socket-option trace this
20250 * means rates > 34Gbps won't show right, but thats probably ok.
20252 optval = (uint32_t)loptval;
20254 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
20255 /* Save it in 64 bit form too */
20261 if (inp->inp_flags & INP_DROPPED) {
20263 return (ECONNRESET);
20265 if (tp->t_fb != &__tcp_rack) {
20267 return (ENOPROTOOPT);
20269 if (rack->defer_options && (rack->gp_ready == 0) &&
20270 (sopt->sopt_name != TCP_DEFER_OPTIONS) &&
20271 (sopt->sopt_name != TCP_RACK_PACING_BETA) &&
20272 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) &&
20273 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) {
20274 /* Options are beind deferred */
20275 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) {
20279 /* No memory to defer, fail */
20284 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval);
20290 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti)
20293 INP_WLOCK_ASSERT(tptoinpcb(tp));
20294 bzero(ti, sizeof(*ti));
20296 ti->tcpi_state = tp->t_state;
20297 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
20298 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
20299 if (tp->t_flags & TF_SACK_PERMIT)
20300 ti->tcpi_options |= TCPI_OPT_SACK;
20301 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
20302 ti->tcpi_options |= TCPI_OPT_WSCALE;
20303 ti->tcpi_snd_wscale = tp->snd_scale;
20304 ti->tcpi_rcv_wscale = tp->rcv_scale;
20306 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))
20307 ti->tcpi_options |= TCPI_OPT_ECN;
20308 if (tp->t_flags & TF_FASTOPEN)
20309 ti->tcpi_options |= TCPI_OPT_TFO;
20310 /* still kept in ticks is t_rcvtime */
20311 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick;
20312 /* Since we hold everything in precise useconds this is easy */
20313 ti->tcpi_rtt = tp->t_srtt;
20314 ti->tcpi_rttvar = tp->t_rttvar;
20315 ti->tcpi_rto = tp->t_rxtcur;
20316 ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
20317 ti->tcpi_snd_cwnd = tp->snd_cwnd;
20319 * FreeBSD-specific extension fields for tcp_info.
20321 ti->tcpi_rcv_space = tp->rcv_wnd;
20322 ti->tcpi_rcv_nxt = tp->rcv_nxt;
20323 ti->tcpi_snd_wnd = tp->snd_wnd;
20324 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */
20325 ti->tcpi_snd_nxt = tp->snd_nxt;
20326 ti->tcpi_snd_mss = tp->t_maxseg;
20327 ti->tcpi_rcv_mss = tp->t_maxseg;
20328 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack;
20329 ti->tcpi_rcv_ooopack = tp->t_rcvoopack;
20330 ti->tcpi_snd_zerowin = tp->t_sndzerowin;
20331 #ifdef NETFLIX_STATS
20332 ti->tcpi_total_tlp = tp->t_sndtlppack;
20333 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte;
20334 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo));
20337 if (tp->t_flags & TF_TOE) {
20338 ti->tcpi_options |= TCPI_OPT_TOE;
20339 tcp_offload_tcp_info(tp, ti);
20345 rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt)
20348 struct tcp_rack *rack;
20349 int32_t error, optval;
20350 uint64_t val, loptval;
20351 struct tcp_info ti;
20353 * Because all our options are either boolean or an int, we can just
20354 * pull everything into optval and then unlock and copy. If we ever
20355 * add a option that is not a int, then this will have quite an
20356 * impact to this routine.
20359 tp = intotcpcb(inp);
20360 rack = (struct tcp_rack *)tp->t_fb_ptr;
20361 if (rack == NULL) {
20365 switch (sopt->sopt_name) {
20367 /* First get the info filled */
20368 rack_fill_info(tp, &ti);
20369 /* Fix up the rtt related fields if needed */
20371 error = sooptcopyout(sopt, &ti, sizeof ti);
20374 * Beta is the congestion control value for NewReno that influences how
20375 * much of a backoff happens when loss is detected. It is normally set
20376 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value
20377 * when you exit recovery.
20379 case TCP_RACK_PACING_BETA:
20380 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0)
20382 else if (rack->rc_pacing_cc_set == 0)
20383 optval = rack->r_ctl.rc_saved_beta.beta;
20386 * Reach out into the CC data and report back what
20387 * I have previously set. Yeah it looks hackish but
20388 * we don't want to report the saved values.
20390 if (tp->t_ccv.cc_data)
20391 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta;
20397 * Beta_ecn is the congestion control value for NewReno that influences how
20398 * much of a backoff happens when a ECN mark is detected. It is normally set
20399 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when
20400 * you exit recovery. Note that classic ECN has a beta of 50, it is only
20401 * ABE Ecn that uses this "less" value, but we do too with pacing :)
20404 case TCP_RACK_PACING_BETA_ECN:
20405 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0)
20407 else if (rack->rc_pacing_cc_set == 0)
20408 optval = rack->r_ctl.rc_saved_beta.beta_ecn;
20411 * Reach out into the CC data and report back what
20412 * I have previously set. Yeah it looks hackish but
20413 * we don't want to report the saved values.
20415 if (tp->t_ccv.cc_data)
20416 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn;
20421 case TCP_RACK_DSACK_OPT:
20423 if (rack->rc_rack_tmr_std_based) {
20426 if (rack->rc_rack_use_dsack) {
20430 case TCP_RACK_ENABLE_HYSTART:
20432 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) {
20433 optval = RACK_HYSTART_ON;
20434 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND)
20435 optval = RACK_HYSTART_ON_W_SC;
20436 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH)
20437 optval = RACK_HYSTART_ON_W_SC_C;
20439 optval = RACK_HYSTART_OFF;
20443 case TCP_FAST_RSM_HACK:
20444 optval = rack->fast_rsm_hack;
20446 case TCP_DEFER_OPTIONS:
20447 optval = rack->defer_options;
20449 case TCP_RACK_MEASURE_CNT:
20450 optval = rack->r_ctl.req_measurements;
20452 case TCP_REC_ABC_VAL:
20453 optval = rack->r_use_labc_for_rec;
20455 case TCP_RACK_ABC_VAL:
20456 optval = rack->rc_labc;
20458 case TCP_HDWR_UP_ONLY:
20459 optval= rack->r_up_only;
20461 case TCP_PACING_RATE_CAP:
20462 loptval = rack->r_ctl.bw_rate_cap;
20464 case TCP_RACK_PROFILE:
20465 /* You cannot retrieve a profile, its write only */
20468 case TCP_USE_CMP_ACKS:
20469 optval = rack->r_use_cmp_ack;
20471 case TCP_RACK_PACE_TO_FILL:
20472 optval = rack->rc_pace_to_cwnd;
20473 if (optval && rack->r_fill_less_agg)
20476 case TCP_RACK_NO_PUSH_AT_MAX:
20477 optval = rack->r_ctl.rc_no_push_at_mrtt;
20479 case TCP_SHARED_CWND_ENABLE:
20480 optval = rack->rack_enable_scwnd;
20482 case TCP_RACK_NONRXT_CFG_RATE:
20483 optval = rack->rack_rec_nonrxt_use_cr;
20486 if (rack->rack_no_prr == 1)
20488 else if (rack->no_prr_addback == 1)
20493 case TCP_RACK_DO_DETECTION:
20494 optval = rack->do_detection;
20496 case TCP_RACK_MBUF_QUEUE:
20497 /* Now do we use the LRO mbuf-queue feature */
20498 optval = rack->r_mbuf_queue;
20500 case TCP_TIMELY_DYN_ADJ:
20501 optval = rack->rc_gp_dyn_mul;
20503 case TCP_BBR_IWINTSO:
20504 optval = rack->rc_init_win;
20506 case TCP_RACK_TLP_REDUCE:
20507 /* RACK TLP cwnd reduction (bool) */
20508 optval = rack->r_ctl.rc_tlp_cwnd_reduce;
20510 case TCP_BBR_RACK_INIT_RATE:
20511 val = rack->r_ctl.init_rate;
20512 /* convert to kbits per sec */
20515 optval = (uint32_t)val;
20517 case TCP_RACK_FORCE_MSEG:
20518 optval = rack->rc_force_max_seg;
20520 case TCP_RACK_PACE_MAX_SEG:
20521 /* Max segments in a pace */
20522 optval = rack->rc_user_set_max_segs;
20524 case TCP_RACK_PACE_ALWAYS:
20525 /* Use the always pace method */
20526 optval = rack->rc_always_pace;
20528 case TCP_RACK_PRR_SENDALOT:
20529 /* Allow PRR to send more than one seg */
20530 optval = rack->r_ctl.rc_prr_sendalot;
20532 case TCP_RACK_MIN_TO:
20533 /* Minimum time between rack t-o's in ms */
20534 optval = rack->r_ctl.rc_min_to;
20536 case TCP_RACK_EARLY_SEG:
20537 /* If early recovery max segments */
20538 optval = rack->r_ctl.rc_early_recovery_segs;
20540 case TCP_RACK_REORD_THRESH:
20541 /* RACK reorder threshold (shift amount) */
20542 optval = rack->r_ctl.rc_reorder_shift;
20544 case TCP_RACK_REORD_FADE:
20545 /* Does reordering fade after ms time */
20546 optval = rack->r_ctl.rc_reorder_fade;
20548 case TCP_BBR_USE_RACK_RR:
20549 /* Do we use the rack cheat for rxt */
20550 optval = rack->use_rack_rr;
20552 case TCP_RACK_RR_CONF:
20553 optval = rack->r_rr_config;
20555 case TCP_HDWR_RATE_CAP:
20556 optval = rack->r_rack_hw_rate_caps;
20558 case TCP_BBR_HDWR_PACE:
20559 optval = rack->rack_hdw_pace_ena;
20561 case TCP_RACK_TLP_THRESH:
20562 /* RACK TLP theshold i.e. srtt+(srtt/N) */
20563 optval = rack->r_ctl.rc_tlp_threshold;
20565 case TCP_RACK_PKT_DELAY:
20566 /* RACK added ms i.e. rack-rtt + reord + N */
20567 optval = rack->r_ctl.rc_pkt_delay;
20569 case TCP_RACK_TLP_USE:
20570 optval = rack->rack_tlp_threshold_use;
20572 case TCP_RACK_PACE_RATE_CA:
20573 optval = rack->r_ctl.rc_fixed_pacing_rate_ca;
20575 case TCP_RACK_PACE_RATE_SS:
20576 optval = rack->r_ctl.rc_fixed_pacing_rate_ss;
20578 case TCP_RACK_PACE_RATE_REC:
20579 optval = rack->r_ctl.rc_fixed_pacing_rate_rec;
20581 case TCP_RACK_GP_INCREASE_SS:
20582 optval = rack->r_ctl.rack_per_of_gp_ca;
20584 case TCP_RACK_GP_INCREASE_CA:
20585 optval = rack->r_ctl.rack_per_of_gp_ss;
20587 case TCP_BBR_RACK_RTT_USE:
20588 optval = rack->r_ctl.rc_rate_sample_method;
20591 optval = tp->t_delayed_ack;
20593 case TCP_DATA_AFTER_CLOSE:
20594 optval = rack->rc_allow_data_af_clo;
20596 case TCP_SHARED_CWND_TIME_LIMIT:
20597 optval = rack->r_limit_scw;
20599 case TCP_RACK_TIMER_SLOP:
20600 optval = rack->r_ctl.timer_slop;
20603 return (tcp_default_ctloutput(inp, sopt));
20608 if (TCP_PACING_RATE_CAP)
20609 error = sooptcopyout(sopt, &loptval, sizeof loptval);
20611 error = sooptcopyout(sopt, &optval, sizeof optval);
20617 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt)
20619 if (sopt->sopt_dir == SOPT_SET) {
20620 return (rack_set_sockopt(inp, sopt));
20621 } else if (sopt->sopt_dir == SOPT_GET) {
20622 return (rack_get_sockopt(inp, sopt));
20624 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir);
20628 static const char *rack_stack_names[] = {
20629 __XSTRING(STACKNAME),
20631 __XSTRING(STACKALIAS),
20636 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
20638 memset(mem, 0, size);
20643 rack_dtor(void *mem, int32_t size, void *arg)
20648 static bool rack_mod_inited = false;
20651 tcp_addrack(module_t mod, int32_t type, void *data)
20658 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
20659 sizeof(struct rack_sendmap),
20660 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
20662 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
20663 sizeof(struct tcp_rack),
20664 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
20666 sysctl_ctx_init(&rack_sysctl_ctx);
20667 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
20668 SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
20671 __XSTRING(STACKALIAS),
20673 __XSTRING(STACKNAME),
20675 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
20677 if (rack_sysctl_root == NULL) {
20678 printf("Failed to add sysctl node\n");
20682 rack_init_sysctls();
20683 num_stacks = nitems(rack_stack_names);
20684 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
20685 rack_stack_names, &num_stacks);
20687 printf("Failed to register %s stack name for "
20688 "%s module\n", rack_stack_names[num_stacks],
20689 __XSTRING(MODNAME));
20690 sysctl_ctx_free(&rack_sysctl_ctx);
20692 uma_zdestroy(rack_zone);
20693 uma_zdestroy(rack_pcb_zone);
20694 rack_counter_destroy();
20695 printf("Failed to register rack module -- err:%d\n", err);
20698 tcp_lro_reg_mbufq();
20699 rack_mod_inited = true;
20702 err = deregister_tcp_functions(&__tcp_rack, true, false);
20705 err = deregister_tcp_functions(&__tcp_rack, false, true);
20708 if (rack_mod_inited) {
20709 uma_zdestroy(rack_zone);
20710 uma_zdestroy(rack_pcb_zone);
20711 sysctl_ctx_free(&rack_sysctl_ctx);
20712 rack_counter_destroy();
20713 rack_mod_inited = false;
20715 tcp_lro_dereg_mbufq();
20719 return (EOPNOTSUPP);
20724 static moduledata_t tcp_rack = {
20725 .name = __XSTRING(MODNAME),
20726 .evhand = tcp_addrack,
20730 MODULE_VERSION(MODNAME, 1);
20731 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
20732 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);
20734 #endif /* #if !defined(INET) && !defined(INET6) */