2 * Copyright (c) 2016-2020 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include "opt_inet6.h"
32 #include "opt_ipsec.h"
33 #include "opt_ratelimit.h"
34 #include "opt_kern_tls.h"
35 #if defined(INET) || defined(INET6)
36 #include <sys/param.h>
38 #include <sys/module.h>
39 #include <sys/kernel.h>
41 #include <sys/hhook.h>
44 #include <sys/malloc.h>
46 #include <sys/mutex.h>
48 #include <sys/proc.h> /* for proc0 declaration */
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
54 #include <sys/qmath.h>
56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
60 #include <sys/refcount.h>
61 #include <sys/queue.h>
62 #include <sys/tim_filter.h>
64 #include <sys/kthread.h>
65 #include <sys/kern_prefetch.h>
66 #include <sys/protosw.h>
68 #include <sys/sched.h>
69 #include <machine/cpu.h>
73 #include <net/route.h>
74 #include <net/route/nhop.h>
77 #define TCPSTATES /* for logging */
79 #include <netinet/in.h>
80 #include <netinet/in_kdtrace.h>
81 #include <netinet/in_pcb.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
85 #include <netinet/ip_var.h>
86 #include <netinet/ip6.h>
87 #include <netinet6/in6_pcb.h>
88 #include <netinet6/ip6_var.h>
89 #include <netinet/tcp.h>
91 #include <netinet/tcp_fsm.h>
92 #include <netinet/tcp_seq.h>
93 #include <netinet/tcp_timer.h>
94 #include <netinet/tcp_var.h>
95 #include <netinet/tcp_log_buf.h>
96 #include <netinet/tcp_syncache.h>
97 #include <netinet/tcp_hpts.h>
98 #include <netinet/tcp_ratelimit.h>
99 #include <netinet/tcp_accounting.h>
100 #include <netinet/tcpip.h>
101 #include <netinet/cc/cc.h>
102 #include <netinet/cc/cc_newreno.h>
103 #include <netinet/tcp_fastopen.h>
104 #include <netinet/tcp_lro.h>
105 #ifdef NETFLIX_SHARED_CWND
106 #include <netinet/tcp_shared_cwnd.h>
109 #include <netinet/tcp_offload.h>
112 #include <netinet6/tcp6_var.h>
114 #include <netinet/tcp_ecn.h>
116 #include <netipsec/ipsec_support.h>
118 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
119 #include <netipsec/ipsec.h>
120 #include <netipsec/ipsec6.h>
123 #include <netinet/udp.h>
124 #include <netinet/udp_var.h>
125 #include <machine/in_cksum.h>
128 #include <security/mac/mac_framework.h>
130 #include "sack_filter.h"
131 #include "tcp_rack.h"
132 #include "tailq_hash.h"
133 #include "rack_bbr_common.h"
135 uma_zone_t rack_zone;
136 uma_zone_t rack_pcb_zone;
139 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t)))
142 VNET_DECLARE(uint32_t, newreno_beta);
143 VNET_DECLARE(uint32_t, newreno_beta_ecn);
144 #define V_newreno_beta VNET(newreno_beta)
145 #define V_newreno_beta_ecn VNET(newreno_beta_ecn)
148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block");
149 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options");
151 struct sysctl_ctx_list rack_sysctl_ctx;
152 struct sysctl_oid *rack_sysctl_root;
158 * The RACK module incorporates a number of
159 * TCP ideas that have been put out into the IETF
160 * over the last few years:
161 * - Matt Mathis's Rate Halving which slowly drops
162 * the congestion window so that the ack clock can
163 * be maintained during a recovery.
164 * - Yuchung Cheng's RACK TCP (for which its named) that
165 * will stop us using the number of dup acks and instead
166 * use time as the gage of when we retransmit.
167 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
168 * of Dukkipati et.al.
169 * RACK depends on SACK, so if an endpoint arrives that
170 * cannot do SACK the state machine below will shuttle the
171 * connection back to using the "default" TCP stack that is
174 * To implement RACK the original TCP stack was first decomposed
175 * into a functional state machine with individual states
176 * for each of the possible TCP connection states. The do_segment
177 * functions role in life is to mandate the connection supports SACK
178 * initially and then assure that the RACK state matches the conenction
179 * state before calling the states do_segment function. Each
180 * state is simplified due to the fact that the original do_segment
181 * has been decomposed and we *know* what state we are in (no
182 * switches on the state) and all tests for SACK are gone. This
183 * greatly simplifies what each state does.
185 * TCP output is also over-written with a new version since it
186 * must maintain the new rack scoreboard.
189 static int32_t rack_tlp_thresh = 1;
190 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */
191 static int32_t rack_tlp_use_greater = 1;
192 static int32_t rack_reorder_thresh = 2;
193 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000
195 static uint32_t rack_clamp_ss_upper = 110;
196 static uint32_t rack_clamp_ca_upper = 105;
197 static uint32_t rack_rxt_min_rnds = 10; /* Min rounds if drastic rxt clamp is in place */
198 static uint32_t rack_unclamp_round_thresh = 100; /* number of perfect rounds before we unclamp */
199 static uint32_t rack_unclamp_rxt_thresh = 5; /* .5% and under */
200 static uint64_t rack_rxt_clamp_thresh = 0; /* Do we do the rxt clamp thing */
201 static int32_t rack_dnd_default = 0; /* For rr_conf = 3, what is the default for dnd */
202 static int32_t rack_rxt_controls = 0;
203 static int32_t rack_fill_cw_state = 0;
204 static uint8_t rack_req_measurements = 1;
205 /* Attack threshold detections */
206 static uint32_t rack_highest_sack_thresh_seen = 0;
207 static uint32_t rack_highest_move_thresh_seen = 0;
208 static uint32_t rack_merge_out_sacks_on_attack = 0;
209 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */
210 static int32_t rack_hw_pace_extra_slots = 0; /* 2 extra MSS time betweens */
211 static int32_t rack_hw_rate_caps = 0; /* 1; */
212 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */
213 static int32_t rack_hw_rate_min = 0; /* 1500000;*/
214 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */
215 static int32_t rack_hw_up_only = 0;
216 static int32_t rack_stats_gets_ms_rtt = 1;
217 static int32_t rack_prr_addbackmax = 2;
218 static int32_t rack_do_hystart = 0;
219 static int32_t rack_apply_rtt_with_reduced_conf = 0;
220 static int32_t rack_hibeta_setting = 0;
221 static int32_t rack_default_pacing_divisor = 250;
222 static int32_t rack_uses_full_dgp_in_rec = 1;
223 static uint16_t rack_pacing_min_seg = 0;
226 static uint32_t sad_seg_size_per = 800; /* 80.0 % */
227 static int32_t rack_pkt_delay = 1000;
228 static int32_t rack_send_a_lot_in_prr = 1;
229 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */
230 static int32_t rack_verbose_logging = 0;
231 static int32_t rack_ignore_data_after_close = 1;
232 static int32_t rack_enable_shared_cwnd = 1;
233 static int32_t rack_use_cmp_acks = 1;
234 static int32_t rack_use_fsb = 1;
235 static int32_t rack_use_rfo = 1;
236 static int32_t rack_use_rsm_rfo = 1;
237 static int32_t rack_max_abc_post_recovery = 2;
238 static int32_t rack_client_low_buf = 0;
239 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */
240 static int32_t rack_bw_multipler = 2; /* Limit on fill cw's jump up to be this x gp_est */
241 #ifdef TCP_ACCOUNTING
242 static int32_t rack_tcp_accounting = 0;
244 static int32_t rack_limits_scwnd = 1;
245 static int32_t rack_enable_mqueue_for_nonpaced = 0;
246 static int32_t rack_hybrid_allow_set_maxseg = 0;
247 static int32_t rack_disable_prr = 0;
248 static int32_t use_rack_rr = 1;
249 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */
250 static int32_t rack_persist_min = 250000; /* 250usec */
251 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */
252 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */
253 static int32_t rack_default_init_window = 0; /* Use system default */
254 static int32_t rack_limit_time_with_srtt = 0;
255 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */
256 static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */
257 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */
258 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */
259 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */
260 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */
261 static int32_t rack_full_buffer_discount = 10;
263 * Currently regular tcp has a rto_min of 30ms
264 * the backoff goes 12 times so that ends up
265 * being a total of 122.850 seconds before a
266 * connection is killed.
268 static uint32_t rack_def_data_window = 20;
269 static uint32_t rack_goal_bdp = 2;
270 static uint32_t rack_min_srtts = 1;
271 static uint32_t rack_min_measure_usec = 0;
272 static int32_t rack_tlp_min = 10000; /* 10ms */
273 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */
274 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */
275 static const int32_t rack_free_cache = 2;
276 static int32_t rack_hptsi_segments = 40;
277 static int32_t rack_rate_sample_method = USE_RTT_LOW;
278 static int32_t rack_pace_every_seg = 0;
279 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */
280 static int32_t rack_slot_reduction = 4;
281 static int32_t rack_wma_divisor = 8; /* For WMA calculation */
282 static int32_t rack_cwnd_block_ends_measure = 0;
283 static int32_t rack_rwnd_block_ends_measure = 0;
284 static int32_t rack_def_profile = 0;
286 static int32_t rack_lower_cwnd_at_tlp = 0;
287 static int32_t rack_limited_retran = 0;
288 static int32_t rack_always_send_oldest = 0;
289 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
291 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */
292 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */
293 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */
296 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */
297 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */
298 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */
299 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */
300 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */
302 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */
303 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */
304 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */
305 static uint32_t rack_probertt_use_min_rtt_exit = 0;
306 static uint32_t rack_probe_rtt_sets_cwnd = 0;
307 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */
308 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */
309 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */
310 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */
311 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */
312 static uint32_t rack_probertt_filter_life = 10000000;
313 static uint32_t rack_probertt_lower_within = 10;
314 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */
315 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */
316 static int32_t rack_probertt_clear_is = 1;
317 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */
318 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */
321 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */
323 /* Timely information */
324 /* Combine these two gives the range of 'no change' to bw */
325 /* ie the up/down provide the upper and lower bound */
326 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */
327 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */
328 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */
329 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */
330 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */
331 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multiplier */
332 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */
333 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */
334 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */
335 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */
336 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */
337 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */
338 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */
339 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */
340 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */
341 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */
342 static int32_t rack_use_max_for_nobackoff = 0;
343 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */
344 static int32_t rack_timely_no_stopping = 0;
345 static int32_t rack_down_raise_thresh = 100;
346 static int32_t rack_req_segs = 1;
347 static uint64_t rack_bw_rate_cap = 0;
350 /* Rack specific counters */
351 counter_u64_t rack_saw_enobuf;
352 counter_u64_t rack_saw_enobuf_hw;
353 counter_u64_t rack_saw_enetunreach;
354 counter_u64_t rack_persists_sends;
355 counter_u64_t rack_persists_acks;
356 counter_u64_t rack_persists_loss;
357 counter_u64_t rack_persists_lost_ends;
358 counter_u64_t rack_total_bytes;
360 counter_u64_t rack_adjust_map_bw;
362 /* Tail loss probe counters */
363 counter_u64_t rack_tlp_tot;
364 counter_u64_t rack_tlp_newdata;
365 counter_u64_t rack_tlp_retran;
366 counter_u64_t rack_tlp_retran_bytes;
367 counter_u64_t rack_to_tot;
368 counter_u64_t rack_hot_alloc;
369 counter_u64_t rack_to_alloc;
370 counter_u64_t rack_to_alloc_hard;
371 counter_u64_t rack_to_alloc_emerg;
372 counter_u64_t rack_to_alloc_limited;
373 counter_u64_t rack_alloc_limited_conns;
374 counter_u64_t rack_split_limited;
375 counter_u64_t rack_rxt_clamps_cwnd;
376 counter_u64_t rack_rxt_clamps_cwnd_uniq;
378 counter_u64_t rack_multi_single_eq;
379 counter_u64_t rack_proc_non_comp_ack;
381 counter_u64_t rack_fto_send;
382 counter_u64_t rack_fto_rsm_send;
383 counter_u64_t rack_nfto_resend;
384 counter_u64_t rack_non_fto_send;
385 counter_u64_t rack_extended_rfo;
387 counter_u64_t rack_sack_proc_all;
388 counter_u64_t rack_sack_proc_short;
389 counter_u64_t rack_sack_proc_restart;
390 counter_u64_t rack_sack_attacks_detected;
391 counter_u64_t rack_sack_attacks_reversed;
392 counter_u64_t rack_sack_attacks_suspect;
393 counter_u64_t rack_sack_used_next_merge;
394 counter_u64_t rack_sack_splits;
395 counter_u64_t rack_sack_used_prev_merge;
396 counter_u64_t rack_sack_skipped_acked;
397 counter_u64_t rack_ack_total;
398 counter_u64_t rack_express_sack;
399 counter_u64_t rack_sack_total;
400 counter_u64_t rack_move_none;
401 counter_u64_t rack_move_some;
403 counter_u64_t rack_input_idle_reduces;
404 counter_u64_t rack_collapsed_win;
405 counter_u64_t rack_collapsed_win_seen;
406 counter_u64_t rack_collapsed_win_rxt;
407 counter_u64_t rack_collapsed_win_rxt_bytes;
408 counter_u64_t rack_try_scwnd;
409 counter_u64_t rack_hw_pace_init_fail;
410 counter_u64_t rack_hw_pace_lost;
412 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
413 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
416 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2)))
418 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \
419 (tv) = (value) + slop; \
420 if ((u_long)(tv) < (u_long)(tvmin)) \
422 if ((u_long)(tv) > (u_long)(tvmax)) \
427 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line);
430 rack_process_ack(struct mbuf *m, struct tcphdr *th,
431 struct socket *so, struct tcpcb *tp, struct tcpopt *to,
432 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val);
434 rack_process_data(struct mbuf *m, struct tcphdr *th,
435 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
436 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
438 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
439 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery);
440 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
441 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack,
443 static struct rack_sendmap *
444 rack_check_recovery_mode(struct tcpcb *tp,
447 rack_cong_signal(struct tcpcb *tp,
448 uint32_t type, uint32_t ack, int );
449 static void rack_counter_destroy(void);
451 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt);
452 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
454 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override);
456 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
457 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos);
458 static void rack_dtor(void *mem, int32_t size, void *arg);
460 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
461 uint32_t flex1, uint32_t flex2,
462 uint32_t flex3, uint32_t flex4,
463 uint32_t flex5, uint32_t flex6,
464 uint16_t flex7, uint8_t mod);
467 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
468 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line,
469 struct rack_sendmap *rsm, uint8_t quality);
470 static struct rack_sendmap *
471 rack_find_high_nonack(struct tcp_rack *rack,
472 struct rack_sendmap *rsm);
473 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
474 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
475 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
476 static int rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt);
478 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
479 tcp_seq th_ack, int line, uint8_t quality);
481 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm);
484 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss);
485 static int32_t rack_handoff_ok(struct tcpcb *tp);
486 static int32_t rack_init(struct tcpcb *tp, void **ptr);
487 static void rack_init_sysctls(void);
490 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
491 struct tcphdr *th, int entered_rec, int dup_ack_struck,
492 int *dsack_seen, int *sacks_seen);
494 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
495 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts,
496 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls, int segsiz);
498 static uint64_t rack_get_gp_est(struct tcp_rack *rack);
501 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
502 struct rack_sendmap *rsm);
503 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm);
504 static int32_t rack_output(struct tcpcb *tp);
507 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
508 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
509 uint32_t cts, int *no_extra, int *moved_two, uint32_t segsiz);
510 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq);
511 static void rack_remxt_tmr(struct tcpcb *tp);
512 static int rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt);
513 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
514 static int32_t rack_stopall(struct tcpcb *tp);
515 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
517 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
518 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag, int segsiz);
520 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
521 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag, int segsiz);
523 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
524 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack);
525 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
527 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
528 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
529 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
531 rack_do_closing(struct mbuf *m, struct tcphdr *th,
532 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
533 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
535 rack_do_established(struct mbuf *m, struct tcphdr *th,
536 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
537 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
539 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
540 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
541 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos);
543 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
544 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
545 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
547 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
548 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
549 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
551 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
552 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
553 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
555 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
556 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
557 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
559 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
560 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
561 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
562 static void rack_chk_http_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts);
563 struct rack_sendmap *
564 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
566 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt,
567 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt);
569 tcp_rack_partialack(struct tcpcb *tp);
571 rack_set_profile(struct tcp_rack *rack, int prof);
573 rack_apply_deferred_options(struct tcp_rack *rack);
575 int32_t rack_clear_counter=0;
578 rack_get_lt_bw(struct tcp_rack *rack)
583 tim = rack->r_ctl.lt_bw_time;
584 bytes = rack->r_ctl.lt_bw_bytes;
585 if (rack->lt_bw_up) {
586 /* Include all the current bytes too */
588 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq);
589 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark);
591 if ((bytes != 0) && (tim != 0))
592 return ((bytes * (uint64_t)1000000) / tim);
598 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8)
601 struct cc_newreno_opts opt;
604 int error, failed = 0;
607 if (tp->t_cc == NULL) {
611 rack->rc_pacing_cc_set = 1;
612 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) {
613 /* Not new-reno we can't play games with beta! */
618 if (CC_ALGO(tp)->ctl_output == NULL) {
619 /* Huh, not using new-reno so no swaps.? */
623 /* Get the current values out */
624 sopt.sopt_valsize = sizeof(struct cc_newreno_opts);
625 sopt.sopt_dir = SOPT_GET;
626 opt.name = CC_NEWRENO_BETA;
627 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
633 opt.name = CC_NEWRENO_BETA_ECN;
634 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
639 old.beta_ecn = opt.val;
641 /* Now lets set in the values we have stored */
642 sopt.sopt_dir = SOPT_SET;
643 opt.name = CC_NEWRENO_BETA;
644 opt.val = rack->r_ctl.rc_saved_beta.beta;
645 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
650 opt.name = CC_NEWRENO_BETA_ECN;
651 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn;
652 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
657 /* Save off the values for restoral */
658 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno));
660 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
661 union tcp_log_stackspecific log;
665 ptr = ((struct newreno *)tp->t_ccv.cc_data);
666 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
667 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
668 log.u_bbr.flex1 = ptr->beta;
669 log.u_bbr.flex2 = ptr->beta_ecn;
670 log.u_bbr.flex3 = ptr->newreno_flags;
671 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta;
672 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn;
673 log.u_bbr.flex6 = failed;
674 log.u_bbr.flex7 = rack->gp_ready;
675 log.u_bbr.flex7 <<= 1;
676 log.u_bbr.flex7 |= rack->use_fixed_rate;
677 log.u_bbr.flex7 <<= 1;
678 log.u_bbr.flex7 |= rack->rc_pacing_cc_set;
679 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
680 log.u_bbr.flex8 = flex8;
681 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error,
682 0, &log, false, NULL, NULL, 0, &tv);
687 rack_set_cc_pacing(struct tcp_rack *rack)
689 if (rack->rc_pacing_cc_set)
692 * Use the swap utility placing in 3 for flex8 to id a
693 * set of a new set of values.
695 rack->rc_pacing_cc_set = 1;
696 rack_swap_beta_values(rack, 3);
700 rack_undo_cc_pacing(struct tcp_rack *rack)
702 if (rack->rc_pacing_cc_set == 0)
705 * Use the swap utility placing in 4 for flex8 to id a
706 * restoral of the old values.
708 rack->rc_pacing_cc_set = 0;
709 rack_swap_beta_values(rack, 4);
713 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t,
714 uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm)
716 if (tcp_bblogging_on(rack->rc_tp)) {
717 union tcp_log_stackspecific log;
720 memset(&log, 0, sizeof(log));
721 log.u_bbr.flex1 = seq_end;
722 log.u_bbr.flex2 = rack->rc_tp->gput_seq;
723 log.u_bbr.flex3 = ack_end_t;
724 log.u_bbr.flex4 = rack->rc_tp->gput_ts;
725 log.u_bbr.flex5 = send_end_t;
726 log.u_bbr.flex6 = rack->rc_tp->gput_ack;
727 log.u_bbr.flex7 = mode;
728 log.u_bbr.flex8 = 69;
729 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts;
730 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts;
731 log.u_bbr.pkts_out = line;
732 log.u_bbr.cwnd_gain = rack->app_limited_needs_set;
733 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt;
735 log.u_bbr.applimited = rsm->r_start;
736 log.u_bbr.delivered = rsm->r_end;
737 log.u_bbr.epoch = rsm->r_flags;
739 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
740 TCP_LOG_EVENTP(rack->rc_tp, NULL,
741 &rack->rc_inp->inp_socket->so_rcv,
742 &rack->rc_inp->inp_socket->so_snd,
743 BBR_LOG_HPTSI_CALC, 0,
744 0, &log, false, &tv);
749 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
754 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
755 if (error || req->newptr == NULL)
758 error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
763 printf("Clearing RACK counters\n");
765 counter_u64_zero(rack_tlp_tot);
766 counter_u64_zero(rack_tlp_newdata);
767 counter_u64_zero(rack_tlp_retran);
768 counter_u64_zero(rack_tlp_retran_bytes);
769 counter_u64_zero(rack_to_tot);
770 counter_u64_zero(rack_saw_enobuf);
771 counter_u64_zero(rack_saw_enobuf_hw);
772 counter_u64_zero(rack_saw_enetunreach);
773 counter_u64_zero(rack_persists_sends);
774 counter_u64_zero(rack_total_bytes);
775 counter_u64_zero(rack_persists_acks);
776 counter_u64_zero(rack_persists_loss);
777 counter_u64_zero(rack_persists_lost_ends);
779 counter_u64_zero(rack_adjust_map_bw);
781 counter_u64_zero(rack_to_alloc_hard);
782 counter_u64_zero(rack_to_alloc_emerg);
783 counter_u64_zero(rack_sack_proc_all);
784 counter_u64_zero(rack_fto_send);
785 counter_u64_zero(rack_fto_rsm_send);
786 counter_u64_zero(rack_extended_rfo);
787 counter_u64_zero(rack_hw_pace_init_fail);
788 counter_u64_zero(rack_hw_pace_lost);
789 counter_u64_zero(rack_non_fto_send);
790 counter_u64_zero(rack_nfto_resend);
791 counter_u64_zero(rack_sack_proc_short);
792 counter_u64_zero(rack_sack_proc_restart);
793 counter_u64_zero(rack_to_alloc);
794 counter_u64_zero(rack_to_alloc_limited);
795 counter_u64_zero(rack_alloc_limited_conns);
796 counter_u64_zero(rack_split_limited);
797 counter_u64_zero(rack_rxt_clamps_cwnd);
798 counter_u64_zero(rack_rxt_clamps_cwnd_uniq);
799 counter_u64_zero(rack_multi_single_eq);
800 counter_u64_zero(rack_proc_non_comp_ack);
801 counter_u64_zero(rack_sack_attacks_detected);
802 counter_u64_zero(rack_sack_attacks_reversed);
803 counter_u64_zero(rack_sack_attacks_suspect);
804 counter_u64_zero(rack_sack_used_next_merge);
805 counter_u64_zero(rack_sack_used_prev_merge);
806 counter_u64_zero(rack_sack_splits);
807 counter_u64_zero(rack_sack_skipped_acked);
808 counter_u64_zero(rack_ack_total);
809 counter_u64_zero(rack_express_sack);
810 counter_u64_zero(rack_sack_total);
811 counter_u64_zero(rack_move_none);
812 counter_u64_zero(rack_move_some);
813 counter_u64_zero(rack_try_scwnd);
814 counter_u64_zero(rack_collapsed_win);
815 counter_u64_zero(rack_collapsed_win_rxt);
816 counter_u64_zero(rack_collapsed_win_seen);
817 counter_u64_zero(rack_collapsed_win_rxt_bytes);
818 } else if (stat == 2) {
820 printf("Clearing RACK option array\n");
822 COUNTER_ARRAY_ZERO(rack_opts_arry, RACK_OPTS_SIZE);
823 } else if (stat == 3) {
824 printf("Rack has no stats counters to clear (use 1 to clear all stats in sysctl node)\n");
825 } else if (stat == 4) {
827 printf("Clearing RACK out size array\n");
829 COUNTER_ARRAY_ZERO(rack_out_size, TCP_MSS_ACCT_SIZE);
831 rack_clear_counter = 0;
836 rack_init_sysctls(void)
838 struct sysctl_oid *rack_counters;
839 struct sysctl_oid *rack_attack;
840 struct sysctl_oid *rack_pacing;
841 struct sysctl_oid *rack_timely;
842 struct sysctl_oid *rack_timers;
843 struct sysctl_oid *rack_tlp;
844 struct sysctl_oid *rack_misc;
845 struct sysctl_oid *rack_features;
846 struct sysctl_oid *rack_measure;
847 struct sysctl_oid *rack_probertt;
848 struct sysctl_oid *rack_hw_pacing;
850 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
851 SYSCTL_CHILDREN(rack_sysctl_root),
854 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
855 "Rack Sack Attack Counters and Controls");
856 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
857 SYSCTL_CHILDREN(rack_sysctl_root),
860 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
862 SYSCTL_ADD_S32(&rack_sysctl_ctx,
863 SYSCTL_CHILDREN(rack_sysctl_root),
864 OID_AUTO, "rate_sample_method", CTLFLAG_RW,
865 &rack_rate_sample_method , USE_RTT_LOW,
866 "What method should we use for rate sampling 0=high, 1=low ");
867 /* Probe rtt related controls */
868 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
869 SYSCTL_CHILDREN(rack_sysctl_root),
872 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
873 "ProbeRTT related Controls");
874 SYSCTL_ADD_U16(&rack_sysctl_ctx,
875 SYSCTL_CHILDREN(rack_probertt),
876 OID_AUTO, "exit_per_hpb", CTLFLAG_RW,
877 &rack_atexit_prtt_hbp, 130,
878 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%");
879 SYSCTL_ADD_U16(&rack_sysctl_ctx,
880 SYSCTL_CHILDREN(rack_probertt),
881 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW,
882 &rack_atexit_prtt, 130,
883 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%");
884 SYSCTL_ADD_U16(&rack_sysctl_ctx,
885 SYSCTL_CHILDREN(rack_probertt),
886 OID_AUTO, "gp_per_mul", CTLFLAG_RW,
887 &rack_per_of_gp_probertt, 60,
888 "What percentage of goodput do we pace at in probertt");
889 SYSCTL_ADD_U16(&rack_sysctl_ctx,
890 SYSCTL_CHILDREN(rack_probertt),
891 OID_AUTO, "gp_per_reduce", CTLFLAG_RW,
892 &rack_per_of_gp_probertt_reduce, 10,
893 "What percentage of goodput do we reduce every gp_srtt");
894 SYSCTL_ADD_U16(&rack_sysctl_ctx,
895 SYSCTL_CHILDREN(rack_probertt),
896 OID_AUTO, "gp_per_low", CTLFLAG_RW,
897 &rack_per_of_gp_lowthresh, 40,
898 "What percentage of goodput do we allow the multiplier to fall to");
899 SYSCTL_ADD_U32(&rack_sysctl_ctx,
900 SYSCTL_CHILDREN(rack_probertt),
901 OID_AUTO, "time_between", CTLFLAG_RW,
902 & rack_time_between_probertt, 96000000,
903 "How many useconds between the lowest rtt falling must past before we enter probertt");
904 SYSCTL_ADD_U32(&rack_sysctl_ctx,
905 SYSCTL_CHILDREN(rack_probertt),
906 OID_AUTO, "safety", CTLFLAG_RW,
907 &rack_probe_rtt_safety_val, 2000000,
908 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)");
909 SYSCTL_ADD_U32(&rack_sysctl_ctx,
910 SYSCTL_CHILDREN(rack_probertt),
911 OID_AUTO, "sets_cwnd", CTLFLAG_RW,
912 &rack_probe_rtt_sets_cwnd, 0,
913 "Do we set the cwnd too (if always_lower is on)");
914 SYSCTL_ADD_U32(&rack_sysctl_ctx,
915 SYSCTL_CHILDREN(rack_probertt),
916 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW,
917 &rack_max_drain_wait, 2,
918 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal");
919 SYSCTL_ADD_U32(&rack_sysctl_ctx,
920 SYSCTL_CHILDREN(rack_probertt),
921 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW,
923 "We must drain this many gp_srtt's waiting for flight to reach goal");
924 SYSCTL_ADD_U32(&rack_sysctl_ctx,
925 SYSCTL_CHILDREN(rack_probertt),
926 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW,
927 &rack_probertt_use_min_rtt_entry, 1,
928 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry");
929 SYSCTL_ADD_U32(&rack_sysctl_ctx,
930 SYSCTL_CHILDREN(rack_probertt),
931 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW,
932 &rack_probertt_use_min_rtt_exit, 0,
933 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt");
934 SYSCTL_ADD_U32(&rack_sysctl_ctx,
935 SYSCTL_CHILDREN(rack_probertt),
936 OID_AUTO, "length_div", CTLFLAG_RW,
937 &rack_probertt_gpsrtt_cnt_div, 0,
938 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)");
939 SYSCTL_ADD_U32(&rack_sysctl_ctx,
940 SYSCTL_CHILDREN(rack_probertt),
941 OID_AUTO, "length_mul", CTLFLAG_RW,
942 &rack_probertt_gpsrtt_cnt_mul, 0,
943 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)");
944 SYSCTL_ADD_U32(&rack_sysctl_ctx,
945 SYSCTL_CHILDREN(rack_probertt),
946 OID_AUTO, "holdtim_at_target", CTLFLAG_RW,
947 &rack_min_probertt_hold, 200000,
948 "What is the minimum time we hold probertt at target");
949 SYSCTL_ADD_U32(&rack_sysctl_ctx,
950 SYSCTL_CHILDREN(rack_probertt),
951 OID_AUTO, "filter_life", CTLFLAG_RW,
952 &rack_probertt_filter_life, 10000000,
953 "What is the time for the filters life in useconds");
954 SYSCTL_ADD_U32(&rack_sysctl_ctx,
955 SYSCTL_CHILDREN(rack_probertt),
956 OID_AUTO, "lower_within", CTLFLAG_RW,
957 &rack_probertt_lower_within, 10,
958 "If the rtt goes lower within this percentage of the time, go into probe-rtt");
959 SYSCTL_ADD_U32(&rack_sysctl_ctx,
960 SYSCTL_CHILDREN(rack_probertt),
961 OID_AUTO, "must_move", CTLFLAG_RW,
962 &rack_min_rtt_movement, 250,
963 "How much is the minimum movement in rtt to count as a drop for probertt purposes");
964 SYSCTL_ADD_U32(&rack_sysctl_ctx,
965 SYSCTL_CHILDREN(rack_probertt),
966 OID_AUTO, "clear_is_cnts", CTLFLAG_RW,
967 &rack_probertt_clear_is, 1,
968 "Do we clear I/S counts on exiting probe-rtt");
969 SYSCTL_ADD_S32(&rack_sysctl_ctx,
970 SYSCTL_CHILDREN(rack_probertt),
971 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW,
972 &rack_max_drain_hbp, 1,
973 "How many extra drain gpsrtt's do we get in highly buffered paths");
974 SYSCTL_ADD_S32(&rack_sysctl_ctx,
975 SYSCTL_CHILDREN(rack_probertt),
976 OID_AUTO, "hbp_threshold", CTLFLAG_RW,
978 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold");
979 /* Pacing related sysctls */
980 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
981 SYSCTL_CHILDREN(rack_sysctl_root),
984 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
985 "Pacing related Controls");
986 SYSCTL_ADD_S32(&rack_sysctl_ctx,
987 SYSCTL_CHILDREN(rack_pacing),
988 OID_AUTO, "fulldgpinrec", CTLFLAG_RW,
989 &rack_uses_full_dgp_in_rec, 1,
990 "Do we use all DGP features in recovery (fillcw, timely et.al.)?");
991 SYSCTL_ADD_S32(&rack_sysctl_ctx,
992 SYSCTL_CHILDREN(rack_pacing),
993 OID_AUTO, "fullbufdisc", CTLFLAG_RW,
994 &rack_full_buffer_discount, 10,
995 "What percentage b/w reduction over the GP estimate for a full buffer (default=0 off)?");
996 SYSCTL_ADD_S32(&rack_sysctl_ctx,
997 SYSCTL_CHILDREN(rack_pacing),
998 OID_AUTO, "fillcw", CTLFLAG_RW,
999 &rack_fill_cw_state, 0,
1000 "Enable fillcw on new connections (default=0 off)?");
1001 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1002 SYSCTL_CHILDREN(rack_pacing),
1003 OID_AUTO, "min_burst", CTLFLAG_RW,
1004 &rack_pacing_min_seg, 0,
1005 "What is the min burst size for pacing (0 disables)?");
1006 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1007 SYSCTL_CHILDREN(rack_pacing),
1008 OID_AUTO, "divisor", CTLFLAG_RW,
1009 &rack_default_pacing_divisor, 4,
1010 "What is the default divisor given to the rl code?");
1011 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1012 SYSCTL_CHILDREN(rack_pacing),
1013 OID_AUTO, "fillcw_max_mult", CTLFLAG_RW,
1014 &rack_bw_multipler, 2,
1015 "What is the multiplier of the current gp_est that fillcw can increase the b/w too?");
1016 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1017 SYSCTL_CHILDREN(rack_pacing),
1018 OID_AUTO, "max_pace_over", CTLFLAG_RW,
1019 &rack_max_per_above, 30,
1020 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)");
1021 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1022 SYSCTL_CHILDREN(rack_pacing),
1023 OID_AUTO, "allow1mss", CTLFLAG_RW,
1024 &rack_pace_one_seg, 0,
1025 "Do we allow low b/w pacing of 1MSS instead of two (1.2Meg and less)?");
1026 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1027 SYSCTL_CHILDREN(rack_pacing),
1028 OID_AUTO, "limit_wsrtt", CTLFLAG_RW,
1029 &rack_limit_time_with_srtt, 0,
1030 "Do we limit pacing time based on srtt");
1031 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1032 SYSCTL_CHILDREN(rack_pacing),
1033 OID_AUTO, "init_win", CTLFLAG_RW,
1034 &rack_default_init_window, 0,
1035 "Do we have a rack initial window 0 = system default");
1036 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1037 SYSCTL_CHILDREN(rack_pacing),
1038 OID_AUTO, "gp_per_ss", CTLFLAG_RW,
1039 &rack_per_of_gp_ss, 250,
1040 "If non zero, what percentage of goodput to pace at in slow start");
1041 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1042 SYSCTL_CHILDREN(rack_pacing),
1043 OID_AUTO, "gp_per_ca", CTLFLAG_RW,
1044 &rack_per_of_gp_ca, 150,
1045 "If non zero, what percentage of goodput to pace at in congestion avoidance");
1046 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1047 SYSCTL_CHILDREN(rack_pacing),
1048 OID_AUTO, "gp_per_rec", CTLFLAG_RW,
1049 &rack_per_of_gp_rec, 200,
1050 "If non zero, what percentage of goodput to pace at in recovery");
1051 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1052 SYSCTL_CHILDREN(rack_pacing),
1053 OID_AUTO, "pace_max_seg", CTLFLAG_RW,
1054 &rack_hptsi_segments, 40,
1055 "What size is the max for TSO segments in pacing and burst mitigation");
1056 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1057 SYSCTL_CHILDREN(rack_pacing),
1058 OID_AUTO, "burst_reduces", CTLFLAG_RW,
1059 &rack_slot_reduction, 4,
1060 "When doing only burst mitigation what is the reduce divisor");
1061 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1062 SYSCTL_CHILDREN(rack_sysctl_root),
1063 OID_AUTO, "use_pacing", CTLFLAG_RW,
1064 &rack_pace_every_seg, 0,
1065 "If set we use pacing, if clear we use only the original burst mitigation");
1066 SYSCTL_ADD_U64(&rack_sysctl_ctx,
1067 SYSCTL_CHILDREN(rack_pacing),
1068 OID_AUTO, "rate_cap", CTLFLAG_RW,
1069 &rack_bw_rate_cap, 0,
1070 "If set we apply this value to the absolute rate cap used by pacing");
1071 SYSCTL_ADD_U8(&rack_sysctl_ctx,
1072 SYSCTL_CHILDREN(rack_sysctl_root),
1073 OID_AUTO, "req_measure_cnt", CTLFLAG_RW,
1074 &rack_req_measurements, 1,
1075 "If doing dynamic pacing, how many measurements must be in before we start pacing?");
1076 /* Hardware pacing */
1077 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1078 SYSCTL_CHILDREN(rack_sysctl_root),
1081 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1082 "Pacing related Controls");
1083 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1084 SYSCTL_CHILDREN(rack_hw_pacing),
1085 OID_AUTO, "rwnd_factor", CTLFLAG_RW,
1086 &rack_hw_rwnd_factor, 2,
1087 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?");
1088 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1089 SYSCTL_CHILDREN(rack_hw_pacing),
1090 OID_AUTO, "precheck", CTLFLAG_RW,
1091 &rack_hw_check_queue, 0,
1092 "Do we always precheck the hdwr pacing queue to avoid ENOBUF's?");
1093 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1094 SYSCTL_CHILDREN(rack_hw_pacing),
1095 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW,
1096 &rack_enobuf_hw_boost_mult, 0,
1097 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?");
1098 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1099 SYSCTL_CHILDREN(rack_hw_pacing),
1100 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW,
1101 &rack_enobuf_hw_max, 2,
1102 "What is the max boost the pacing time if we see a ENOBUFS?");
1103 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1104 SYSCTL_CHILDREN(rack_hw_pacing),
1105 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW,
1106 &rack_enobuf_hw_min, 2,
1107 "What is the min boost the pacing time if we see a ENOBUFS?");
1108 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1109 SYSCTL_CHILDREN(rack_hw_pacing),
1110 OID_AUTO, "enable", CTLFLAG_RW,
1111 &rack_enable_hw_pacing, 0,
1112 "Should RACK attempt to use hw pacing?");
1113 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1114 SYSCTL_CHILDREN(rack_hw_pacing),
1115 OID_AUTO, "rate_cap", CTLFLAG_RW,
1116 &rack_hw_rate_caps, 0,
1117 "Does the highest hardware pacing rate cap the rate we will send at??");
1118 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1119 SYSCTL_CHILDREN(rack_hw_pacing),
1120 OID_AUTO, "uncap_per", CTLFLAG_RW,
1121 &rack_hw_rate_cap_per, 0,
1122 "If you go over b/w by this amount you will be uncapped (0 = never)");
1123 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1124 SYSCTL_CHILDREN(rack_hw_pacing),
1125 OID_AUTO, "rate_min", CTLFLAG_RW,
1126 &rack_hw_rate_min, 0,
1127 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?");
1128 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1129 SYSCTL_CHILDREN(rack_hw_pacing),
1130 OID_AUTO, "rate_to_low", CTLFLAG_RW,
1131 &rack_hw_rate_to_low, 0,
1132 "If we fall below this rate, dis-engage hw pacing?");
1133 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1134 SYSCTL_CHILDREN(rack_hw_pacing),
1135 OID_AUTO, "up_only", CTLFLAG_RW,
1136 &rack_hw_up_only, 0,
1137 "Do we allow hw pacing to lower the rate selected?");
1138 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1139 SYSCTL_CHILDREN(rack_hw_pacing),
1140 OID_AUTO, "extra_mss_precise", CTLFLAG_RW,
1141 &rack_hw_pace_extra_slots, 0,
1142 "If the rates between software and hardware match precisely how many extra time_betweens do we get?");
1143 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1144 SYSCTL_CHILDREN(rack_sysctl_root),
1147 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1148 "Rack Timely RTT Controls");
1149 /* Timely based GP dynmics */
1150 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1151 SYSCTL_CHILDREN(rack_timely),
1152 OID_AUTO, "upper", CTLFLAG_RW,
1153 &rack_gp_per_bw_mul_up, 2,
1154 "Rack timely upper range for equal b/w (in percentage)");
1155 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1156 SYSCTL_CHILDREN(rack_timely),
1157 OID_AUTO, "lower", CTLFLAG_RW,
1158 &rack_gp_per_bw_mul_down, 4,
1159 "Rack timely lower range for equal b/w (in percentage)");
1160 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1161 SYSCTL_CHILDREN(rack_timely),
1162 OID_AUTO, "rtt_max_mul", CTLFLAG_RW,
1163 &rack_gp_rtt_maxmul, 3,
1164 "Rack timely multiplier of lowest rtt for rtt_max");
1165 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1166 SYSCTL_CHILDREN(rack_timely),
1167 OID_AUTO, "rtt_min_div", CTLFLAG_RW,
1168 &rack_gp_rtt_mindiv, 4,
1169 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt");
1170 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1171 SYSCTL_CHILDREN(rack_timely),
1172 OID_AUTO, "rtt_min_mul", CTLFLAG_RW,
1173 &rack_gp_rtt_minmul, 1,
1174 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt");
1175 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1176 SYSCTL_CHILDREN(rack_timely),
1177 OID_AUTO, "decrease", CTLFLAG_RW,
1178 &rack_gp_decrease_per, 20,
1179 "Rack timely decrease percentage of our GP multiplication factor");
1180 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1181 SYSCTL_CHILDREN(rack_timely),
1182 OID_AUTO, "increase", CTLFLAG_RW,
1183 &rack_gp_increase_per, 2,
1184 "Rack timely increase perentage of our GP multiplication factor");
1185 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1186 SYSCTL_CHILDREN(rack_timely),
1187 OID_AUTO, "lowerbound", CTLFLAG_RW,
1188 &rack_per_lower_bound, 50,
1189 "Rack timely lowest percentage we allow GP multiplier to fall to");
1190 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1191 SYSCTL_CHILDREN(rack_timely),
1192 OID_AUTO, "upperboundss", CTLFLAG_RW,
1193 &rack_per_upper_bound_ss, 0,
1194 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)");
1195 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1196 SYSCTL_CHILDREN(rack_timely),
1197 OID_AUTO, "upperboundca", CTLFLAG_RW,
1198 &rack_per_upper_bound_ca, 0,
1199 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)");
1200 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1201 SYSCTL_CHILDREN(rack_timely),
1202 OID_AUTO, "dynamicgp", CTLFLAG_RW,
1203 &rack_do_dyn_mul, 0,
1204 "Rack timely do we enable dynmaic timely goodput by default");
1205 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1206 SYSCTL_CHILDREN(rack_timely),
1207 OID_AUTO, "no_rec_red", CTLFLAG_RW,
1208 &rack_gp_no_rec_chg, 1,
1209 "Rack timely do we prohibit the recovery multiplier from being lowered");
1210 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1211 SYSCTL_CHILDREN(rack_timely),
1212 OID_AUTO, "red_clear_cnt", CTLFLAG_RW,
1213 &rack_timely_dec_clear, 6,
1214 "Rack timely what threshold do we count to before another boost during b/w decent");
1215 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1216 SYSCTL_CHILDREN(rack_timely),
1217 OID_AUTO, "max_push_rise", CTLFLAG_RW,
1218 &rack_timely_max_push_rise, 3,
1219 "Rack timely how many times do we push up with b/w increase");
1220 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1221 SYSCTL_CHILDREN(rack_timely),
1222 OID_AUTO, "max_push_drop", CTLFLAG_RW,
1223 &rack_timely_max_push_drop, 3,
1224 "Rack timely how many times do we push back on b/w decent");
1225 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1226 SYSCTL_CHILDREN(rack_timely),
1227 OID_AUTO, "min_segs", CTLFLAG_RW,
1228 &rack_timely_min_segs, 4,
1229 "Rack timely when setting the cwnd what is the min num segments");
1230 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1231 SYSCTL_CHILDREN(rack_timely),
1232 OID_AUTO, "noback_max", CTLFLAG_RW,
1233 &rack_use_max_for_nobackoff, 0,
1234 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min");
1235 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1236 SYSCTL_CHILDREN(rack_timely),
1237 OID_AUTO, "interim_timely_only", CTLFLAG_RW,
1238 &rack_timely_int_timely_only, 0,
1239 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)");
1240 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1241 SYSCTL_CHILDREN(rack_timely),
1242 OID_AUTO, "nonstop", CTLFLAG_RW,
1243 &rack_timely_no_stopping, 0,
1244 "Rack timely don't stop increase");
1245 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1246 SYSCTL_CHILDREN(rack_timely),
1247 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW,
1248 &rack_down_raise_thresh, 100,
1249 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)");
1250 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1251 SYSCTL_CHILDREN(rack_timely),
1252 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW,
1254 "Bottom dragging if not these many segments outstanding and room");
1256 /* TLP and Rack related parameters */
1257 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1258 SYSCTL_CHILDREN(rack_sysctl_root),
1261 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1262 "TLP and Rack related Controls");
1263 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1264 SYSCTL_CHILDREN(rack_tlp),
1265 OID_AUTO, "use_rrr", CTLFLAG_RW,
1267 "Do we use Rack Rapid Recovery");
1268 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1269 SYSCTL_CHILDREN(rack_tlp),
1270 OID_AUTO, "post_rec_labc", CTLFLAG_RW,
1271 &rack_max_abc_post_recovery, 2,
1272 "Since we do early recovery, do we override the l_abc to a value, if so what?");
1273 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1274 SYSCTL_CHILDREN(rack_tlp),
1275 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW,
1276 &rack_non_rxt_use_cr, 0,
1277 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk");
1278 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1279 SYSCTL_CHILDREN(rack_tlp),
1280 OID_AUTO, "tlpmethod", CTLFLAG_RW,
1281 &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
1282 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
1283 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1284 SYSCTL_CHILDREN(rack_tlp),
1285 OID_AUTO, "limit", CTLFLAG_RW,
1287 "How many TLP's can be sent without sending new data");
1288 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1289 SYSCTL_CHILDREN(rack_tlp),
1290 OID_AUTO, "use_greater", CTLFLAG_RW,
1291 &rack_tlp_use_greater, 1,
1292 "Should we use the rack_rtt time if its greater than srtt");
1293 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1294 SYSCTL_CHILDREN(rack_tlp),
1295 OID_AUTO, "tlpminto", CTLFLAG_RW,
1296 &rack_tlp_min, 10000,
1297 "TLP minimum timeout per the specification (in microseconds)");
1298 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1299 SYSCTL_CHILDREN(rack_tlp),
1300 OID_AUTO, "send_oldest", CTLFLAG_RW,
1301 &rack_always_send_oldest, 0,
1302 "Should we always send the oldest TLP and RACK-TLP");
1303 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1304 SYSCTL_CHILDREN(rack_tlp),
1305 OID_AUTO, "rack_tlimit", CTLFLAG_RW,
1306 &rack_limited_retran, 0,
1307 "How many times can a rack timeout drive out sends");
1308 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1309 SYSCTL_CHILDREN(rack_tlp),
1310 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
1311 &rack_lower_cwnd_at_tlp, 0,
1312 "When a TLP completes a retran should we enter recovery");
1313 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1314 SYSCTL_CHILDREN(rack_tlp),
1315 OID_AUTO, "reorder_thresh", CTLFLAG_RW,
1316 &rack_reorder_thresh, 2,
1317 "What factor for rack will be added when seeing reordering (shift right)");
1318 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1319 SYSCTL_CHILDREN(rack_tlp),
1320 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
1321 &rack_tlp_thresh, 1,
1322 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
1323 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1324 SYSCTL_CHILDREN(rack_tlp),
1325 OID_AUTO, "reorder_fade", CTLFLAG_RW,
1326 &rack_reorder_fade, 60000000,
1327 "Does reorder detection fade, if so how many microseconds (0 means never)");
1328 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1329 SYSCTL_CHILDREN(rack_tlp),
1330 OID_AUTO, "pktdelay", CTLFLAG_RW,
1331 &rack_pkt_delay, 1000,
1332 "Extra RACK time (in microseconds) besides reordering thresh");
1334 /* Timer related controls */
1335 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1336 SYSCTL_CHILDREN(rack_sysctl_root),
1339 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1340 "Timer related controls");
1341 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1342 SYSCTL_CHILDREN(rack_timers),
1343 OID_AUTO, "persmin", CTLFLAG_RW,
1344 &rack_persist_min, 250000,
1345 "What is the minimum time in microseconds between persists");
1346 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1347 SYSCTL_CHILDREN(rack_timers),
1348 OID_AUTO, "persmax", CTLFLAG_RW,
1349 &rack_persist_max, 2000000,
1350 "What is the largest delay in microseconds between persists");
1351 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1352 SYSCTL_CHILDREN(rack_timers),
1353 OID_AUTO, "delayed_ack", CTLFLAG_RW,
1354 &rack_delayed_ack_time, 40000,
1355 "Delayed ack time (40ms in microseconds)");
1356 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1357 SYSCTL_CHILDREN(rack_timers),
1358 OID_AUTO, "minrto", CTLFLAG_RW,
1359 &rack_rto_min, 30000,
1360 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP");
1361 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1362 SYSCTL_CHILDREN(rack_timers),
1363 OID_AUTO, "maxrto", CTLFLAG_RW,
1364 &rack_rto_max, 4000000,
1365 "Maximum RTO in microseconds -- should be at least as large as min_rto");
1366 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1367 SYSCTL_CHILDREN(rack_timers),
1368 OID_AUTO, "minto", CTLFLAG_RW,
1370 "Minimum rack timeout in microseconds");
1371 /* Measure controls */
1372 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1373 SYSCTL_CHILDREN(rack_sysctl_root),
1376 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1377 "Measure related controls");
1378 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1379 SYSCTL_CHILDREN(rack_measure),
1380 OID_AUTO, "wma_divisor", CTLFLAG_RW,
1381 &rack_wma_divisor, 8,
1382 "When doing b/w calculation what is the divisor for the WMA");
1383 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1384 SYSCTL_CHILDREN(rack_measure),
1385 OID_AUTO, "end_cwnd", CTLFLAG_RW,
1386 &rack_cwnd_block_ends_measure, 0,
1387 "Does a cwnd just-return end the measurement window (app limited)");
1388 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1389 SYSCTL_CHILDREN(rack_measure),
1390 OID_AUTO, "end_rwnd", CTLFLAG_RW,
1391 &rack_rwnd_block_ends_measure, 0,
1392 "Does an rwnd just-return end the measurement window (app limited -- not persists)");
1393 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1394 SYSCTL_CHILDREN(rack_measure),
1395 OID_AUTO, "min_target", CTLFLAG_RW,
1396 &rack_def_data_window, 20,
1397 "What is the minimum target window (in mss) for a GP measurements");
1398 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1399 SYSCTL_CHILDREN(rack_measure),
1400 OID_AUTO, "goal_bdp", CTLFLAG_RW,
1402 "What is the goal BDP to measure");
1403 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1404 SYSCTL_CHILDREN(rack_measure),
1405 OID_AUTO, "min_srtts", CTLFLAG_RW,
1407 "What is the goal BDP to measure");
1408 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1409 SYSCTL_CHILDREN(rack_measure),
1410 OID_AUTO, "min_measure_tim", CTLFLAG_RW,
1411 &rack_min_measure_usec, 0,
1412 "What is the Minimum time time for a measurement if 0, this is off");
1414 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1415 SYSCTL_CHILDREN(rack_sysctl_root),
1418 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1419 "Feature controls");
1420 SYSCTL_ADD_U64(&rack_sysctl_ctx,
1421 SYSCTL_CHILDREN(rack_features),
1422 OID_AUTO, "rxt_clamp_thresh", CTLFLAG_RW,
1423 &rack_rxt_clamp_thresh, 0,
1424 "Bit encoded clamping setup bits CCCC CCCCC UUUU UULF PPPP PPPP PPPP PPPP");
1425 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1426 SYSCTL_CHILDREN(rack_features),
1427 OID_AUTO, "hybrid_set_maxseg", CTLFLAG_RW,
1428 &rack_hybrid_allow_set_maxseg, 0,
1429 "Should hybrid pacing allow the setmss command");
1430 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1431 SYSCTL_CHILDREN(rack_features),
1432 OID_AUTO, "cmpack", CTLFLAG_RW,
1433 &rack_use_cmp_acks, 1,
1434 "Should RACK have LRO send compressed acks");
1435 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1436 SYSCTL_CHILDREN(rack_features),
1437 OID_AUTO, "fsb", CTLFLAG_RW,
1439 "Should RACK use the fast send block?");
1440 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1441 SYSCTL_CHILDREN(rack_features),
1442 OID_AUTO, "rfo", CTLFLAG_RW,
1444 "Should RACK use rack_fast_output()?");
1445 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1446 SYSCTL_CHILDREN(rack_features),
1447 OID_AUTO, "rsmrfo", CTLFLAG_RW,
1448 &rack_use_rsm_rfo, 1,
1449 "Should RACK use rack_fast_rsm_output()?");
1450 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1451 SYSCTL_CHILDREN(rack_features),
1452 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW,
1453 &rack_enable_mqueue_for_nonpaced, 0,
1454 "Should RACK use mbuf queuing for non-paced connections");
1455 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1456 SYSCTL_CHILDREN(rack_features),
1457 OID_AUTO, "hystartplusplus", CTLFLAG_RW,
1458 &rack_do_hystart, 0,
1459 "Should RACK enable HyStart++ on connections?");
1460 /* Misc rack controls */
1461 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1462 SYSCTL_CHILDREN(rack_sysctl_root),
1465 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1466 "Misc related controls");
1467 #ifdef TCP_ACCOUNTING
1468 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1469 SYSCTL_CHILDREN(rack_misc),
1470 OID_AUTO, "tcp_acct", CTLFLAG_RW,
1471 &rack_tcp_accounting, 0,
1472 "Should we turn on TCP accounting for all rack sessions?");
1474 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1475 SYSCTL_CHILDREN(rack_misc),
1476 OID_AUTO, "dnd", CTLFLAG_RW,
1477 &rack_dnd_default, 0,
1478 "Do not disturb default for rack_rrr = 3");
1479 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1480 SYSCTL_CHILDREN(rack_misc),
1481 OID_AUTO, "sad_seg_per", CTLFLAG_RW,
1482 &sad_seg_size_per, 800,
1483 "Percentage of segment size needed in a sack 800 = 80.0?");
1484 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1485 SYSCTL_CHILDREN(rack_misc),
1486 OID_AUTO, "rxt_controls", CTLFLAG_RW,
1487 &rack_rxt_controls, 0,
1488 "Retransmit sending size controls (valid values 0, 1, 2 default=1)?");
1489 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1490 SYSCTL_CHILDREN(rack_misc),
1491 OID_AUTO, "rack_hibeta", CTLFLAG_RW,
1492 &rack_hibeta_setting, 0,
1493 "Do we ue a high beta (80 instead of 50)?");
1494 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1495 SYSCTL_CHILDREN(rack_misc),
1496 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW,
1497 &rack_apply_rtt_with_reduced_conf, 0,
1498 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?");
1499 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1500 SYSCTL_CHILDREN(rack_misc),
1501 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW,
1502 &rack_dsack_std_based, 3,
1503 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?");
1504 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1505 SYSCTL_CHILDREN(rack_misc),
1506 OID_AUTO, "prr_addback_max", CTLFLAG_RW,
1507 &rack_prr_addbackmax, 2,
1508 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?");
1509 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1510 SYSCTL_CHILDREN(rack_misc),
1511 OID_AUTO, "stats_gets_ms", CTLFLAG_RW,
1512 &rack_stats_gets_ms_rtt, 1,
1513 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?");
1514 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1515 SYSCTL_CHILDREN(rack_misc),
1516 OID_AUTO, "clientlowbuf", CTLFLAG_RW,
1517 &rack_client_low_buf, 0,
1518 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?");
1519 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1520 SYSCTL_CHILDREN(rack_misc),
1521 OID_AUTO, "defprofile", CTLFLAG_RW,
1522 &rack_def_profile, 0,
1523 "Should RACK use a default profile (0=no, num == profile num)?");
1524 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1525 SYSCTL_CHILDREN(rack_misc),
1526 OID_AUTO, "shared_cwnd", CTLFLAG_RW,
1527 &rack_enable_shared_cwnd, 1,
1528 "Should RACK try to use the shared cwnd on connections where allowed");
1529 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1530 SYSCTL_CHILDREN(rack_misc),
1531 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW,
1532 &rack_limits_scwnd, 1,
1533 "Should RACK place low end time limits on the shared cwnd feature");
1534 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1535 SYSCTL_CHILDREN(rack_misc),
1536 OID_AUTO, "no_prr", CTLFLAG_RW,
1537 &rack_disable_prr, 0,
1538 "Should RACK not use prr and only pace (must have pacing on)");
1539 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1540 SYSCTL_CHILDREN(rack_misc),
1541 OID_AUTO, "bb_verbose", CTLFLAG_RW,
1542 &rack_verbose_logging, 0,
1543 "Should RACK black box logging be verbose");
1544 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1545 SYSCTL_CHILDREN(rack_misc),
1546 OID_AUTO, "data_after_close", CTLFLAG_RW,
1547 &rack_ignore_data_after_close, 1,
1548 "Do we hold off sending a RST until all pending data is ack'd");
1549 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1550 SYSCTL_CHILDREN(rack_misc),
1551 OID_AUTO, "no_sack_needed", CTLFLAG_RW,
1552 &rack_sack_not_required, 1,
1553 "Do we allow rack to run on connections not supporting SACK");
1554 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1555 SYSCTL_CHILDREN(rack_misc),
1556 OID_AUTO, "prr_sendalot", CTLFLAG_RW,
1557 &rack_send_a_lot_in_prr, 1,
1558 "Send a lot in prr");
1559 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1560 SYSCTL_CHILDREN(rack_misc),
1561 OID_AUTO, "autoscale", CTLFLAG_RW,
1562 &rack_autosndbuf_inc, 20,
1563 "What percentage should rack scale up its snd buffer by?");
1564 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1565 SYSCTL_CHILDREN(rack_misc),
1566 OID_AUTO, "rnds_for_rxt_clamp", CTLFLAG_RW,
1567 &rack_rxt_min_rnds, 10,
1568 "Number of rounds needed between RTT clamps due to high loss rates");
1569 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1570 SYSCTL_CHILDREN(rack_misc),
1571 OID_AUTO, "rnds_for_unclamp", CTLFLAG_RW,
1572 &rack_unclamp_round_thresh, 100,
1573 "Number of rounds needed with no loss to unclamp");
1574 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1575 SYSCTL_CHILDREN(rack_misc),
1576 OID_AUTO, "rxt_threshs_for_unclamp", CTLFLAG_RW,
1577 &rack_unclamp_rxt_thresh, 5,
1578 "Percentage of retransmits we need to be under to unclamp (5 = .5 percent)\n");
1579 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1580 SYSCTL_CHILDREN(rack_misc),
1581 OID_AUTO, "clamp_ss_upper", CTLFLAG_RW,
1582 &rack_clamp_ss_upper, 110,
1583 "Clamp percentage ceiling in SS?");
1584 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1585 SYSCTL_CHILDREN(rack_misc),
1586 OID_AUTO, "clamp_ca_upper", CTLFLAG_RW,
1587 &rack_clamp_ca_upper, 110,
1588 "Clamp percentage ceiling in CA?");
1589 /* Sack Attacker detection stuff */
1590 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1591 SYSCTL_CHILDREN(rack_attack),
1592 OID_AUTO, "merge_out", CTLFLAG_RW,
1593 &rack_merge_out_sacks_on_attack, 0,
1594 "Do we merge the sendmap when we decide we are being attacked?");
1596 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1597 SYSCTL_CHILDREN(rack_attack),
1598 OID_AUTO, "detect_highsackratio", CTLFLAG_RW,
1599 &rack_highest_sack_thresh_seen, 0,
1600 "Highest sack to ack ratio seen");
1601 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1602 SYSCTL_CHILDREN(rack_attack),
1603 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW,
1604 &rack_highest_move_thresh_seen, 0,
1605 "Highest move to non-move ratio seen");
1606 rack_ack_total = counter_u64_alloc(M_WAITOK);
1607 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1608 SYSCTL_CHILDREN(rack_attack),
1609 OID_AUTO, "acktotal", CTLFLAG_RD,
1611 "Total number of Ack's");
1612 rack_express_sack = counter_u64_alloc(M_WAITOK);
1613 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1614 SYSCTL_CHILDREN(rack_attack),
1615 OID_AUTO, "exp_sacktotal", CTLFLAG_RD,
1617 "Total expresss number of Sack's");
1618 rack_sack_total = counter_u64_alloc(M_WAITOK);
1619 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1620 SYSCTL_CHILDREN(rack_attack),
1621 OID_AUTO, "sacktotal", CTLFLAG_RD,
1623 "Total number of SACKs");
1624 rack_move_none = counter_u64_alloc(M_WAITOK);
1625 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1626 SYSCTL_CHILDREN(rack_attack),
1627 OID_AUTO, "move_none", CTLFLAG_RD,
1629 "Total number of SACK index reuse of positions under threshold");
1630 rack_move_some = counter_u64_alloc(M_WAITOK);
1631 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1632 SYSCTL_CHILDREN(rack_attack),
1633 OID_AUTO, "move_some", CTLFLAG_RD,
1635 "Total number of SACK index reuse of positions over threshold");
1636 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK);
1637 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1638 SYSCTL_CHILDREN(rack_attack),
1639 OID_AUTO, "attacks", CTLFLAG_RD,
1640 &rack_sack_attacks_detected,
1641 "Total number of SACK attackers that had sack disabled");
1642 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK);
1643 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1644 SYSCTL_CHILDREN(rack_attack),
1645 OID_AUTO, "reversed", CTLFLAG_RD,
1646 &rack_sack_attacks_reversed,
1647 "Total number of SACK attackers that were later determined false positive");
1648 rack_sack_attacks_suspect = counter_u64_alloc(M_WAITOK);
1649 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1650 SYSCTL_CHILDREN(rack_attack),
1651 OID_AUTO, "suspect", CTLFLAG_RD,
1652 &rack_sack_attacks_suspect,
1653 "Total number of SACKs that triggered early detection");
1655 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK);
1656 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1657 SYSCTL_CHILDREN(rack_attack),
1658 OID_AUTO, "nextmerge", CTLFLAG_RD,
1659 &rack_sack_used_next_merge,
1660 "Total number of times we used the next merge");
1661 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK);
1662 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1663 SYSCTL_CHILDREN(rack_attack),
1664 OID_AUTO, "prevmerge", CTLFLAG_RD,
1665 &rack_sack_used_prev_merge,
1666 "Total number of times we used the prev merge");
1668 rack_total_bytes = counter_u64_alloc(M_WAITOK);
1669 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1670 SYSCTL_CHILDREN(rack_counters),
1671 OID_AUTO, "totalbytes", CTLFLAG_RD,
1673 "Total number of bytes sent");
1674 rack_fto_send = counter_u64_alloc(M_WAITOK);
1675 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1676 SYSCTL_CHILDREN(rack_counters),
1677 OID_AUTO, "fto_send", CTLFLAG_RD,
1678 &rack_fto_send, "Total number of rack_fast_output sends");
1679 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK);
1680 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1681 SYSCTL_CHILDREN(rack_counters),
1682 OID_AUTO, "fto_rsm_send", CTLFLAG_RD,
1683 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends");
1684 rack_nfto_resend = counter_u64_alloc(M_WAITOK);
1685 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1686 SYSCTL_CHILDREN(rack_counters),
1687 OID_AUTO, "nfto_resend", CTLFLAG_RD,
1688 &rack_nfto_resend, "Total number of rack_output retransmissions");
1689 rack_non_fto_send = counter_u64_alloc(M_WAITOK);
1690 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1691 SYSCTL_CHILDREN(rack_counters),
1692 OID_AUTO, "nfto_send", CTLFLAG_RD,
1693 &rack_non_fto_send, "Total number of rack_output first sends");
1694 rack_extended_rfo = counter_u64_alloc(M_WAITOK);
1695 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1696 SYSCTL_CHILDREN(rack_counters),
1697 OID_AUTO, "rfo_extended", CTLFLAG_RD,
1698 &rack_extended_rfo, "Total number of times we extended rfo");
1700 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK);
1701 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1702 SYSCTL_CHILDREN(rack_counters),
1703 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD,
1704 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing");
1705 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK);
1707 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1708 SYSCTL_CHILDREN(rack_counters),
1709 OID_AUTO, "hwpace_lost", CTLFLAG_RD,
1710 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing");
1711 rack_tlp_tot = counter_u64_alloc(M_WAITOK);
1712 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1713 SYSCTL_CHILDREN(rack_counters),
1714 OID_AUTO, "tlp_to_total", CTLFLAG_RD,
1716 "Total number of tail loss probe expirations");
1717 rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
1718 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1719 SYSCTL_CHILDREN(rack_counters),
1720 OID_AUTO, "tlp_new", CTLFLAG_RD,
1722 "Total number of tail loss probe sending new data");
1723 rack_tlp_retran = counter_u64_alloc(M_WAITOK);
1724 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1725 SYSCTL_CHILDREN(rack_counters),
1726 OID_AUTO, "tlp_retran", CTLFLAG_RD,
1728 "Total number of tail loss probe sending retransmitted data");
1729 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
1730 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1731 SYSCTL_CHILDREN(rack_counters),
1732 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
1733 &rack_tlp_retran_bytes,
1734 "Total bytes of tail loss probe sending retransmitted data");
1735 rack_to_tot = counter_u64_alloc(M_WAITOK);
1736 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1737 SYSCTL_CHILDREN(rack_counters),
1738 OID_AUTO, "rack_to_tot", CTLFLAG_RD,
1740 "Total number of times the rack to expired");
1741 rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
1742 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1743 SYSCTL_CHILDREN(rack_counters),
1744 OID_AUTO, "saw_enobufs", CTLFLAG_RD,
1746 "Total number of times a sends returned enobuf for non-hdwr paced connections");
1747 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK);
1748 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1749 SYSCTL_CHILDREN(rack_counters),
1750 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD,
1751 &rack_saw_enobuf_hw,
1752 "Total number of times a send returned enobuf for hdwr paced connections");
1753 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
1754 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1755 SYSCTL_CHILDREN(rack_counters),
1756 OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
1757 &rack_saw_enetunreach,
1758 "Total number of times a send received a enetunreachable");
1759 rack_hot_alloc = counter_u64_alloc(M_WAITOK);
1760 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1761 SYSCTL_CHILDREN(rack_counters),
1762 OID_AUTO, "alloc_hot", CTLFLAG_RD,
1764 "Total allocations from the top of our list");
1765 rack_to_alloc = counter_u64_alloc(M_WAITOK);
1766 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1767 SYSCTL_CHILDREN(rack_counters),
1768 OID_AUTO, "allocs", CTLFLAG_RD,
1770 "Total allocations of tracking structures");
1771 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
1772 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1773 SYSCTL_CHILDREN(rack_counters),
1774 OID_AUTO, "allochard", CTLFLAG_RD,
1775 &rack_to_alloc_hard,
1776 "Total allocations done with sleeping the hard way");
1777 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
1778 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1779 SYSCTL_CHILDREN(rack_counters),
1780 OID_AUTO, "allocemerg", CTLFLAG_RD,
1781 &rack_to_alloc_emerg,
1782 "Total allocations done from emergency cache");
1783 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK);
1784 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1785 SYSCTL_CHILDREN(rack_counters),
1786 OID_AUTO, "alloc_limited", CTLFLAG_RD,
1787 &rack_to_alloc_limited,
1788 "Total allocations dropped due to limit");
1789 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK);
1790 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1791 SYSCTL_CHILDREN(rack_counters),
1792 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD,
1793 &rack_alloc_limited_conns,
1794 "Connections with allocations dropped due to limit");
1795 rack_split_limited = counter_u64_alloc(M_WAITOK);
1796 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1797 SYSCTL_CHILDREN(rack_counters),
1798 OID_AUTO, "split_limited", CTLFLAG_RD,
1799 &rack_split_limited,
1800 "Split allocations dropped due to limit");
1801 rack_rxt_clamps_cwnd = counter_u64_alloc(M_WAITOK);
1802 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1803 SYSCTL_CHILDREN(rack_counters),
1804 OID_AUTO, "rxt_clamps_cwnd", CTLFLAG_RD,
1805 &rack_rxt_clamps_cwnd,
1806 "Number of times that excessive rxt clamped the cwnd down");
1807 rack_rxt_clamps_cwnd_uniq = counter_u64_alloc(M_WAITOK);
1808 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1809 SYSCTL_CHILDREN(rack_counters),
1810 OID_AUTO, "rxt_clamps_cwnd_uniq", CTLFLAG_RD,
1811 &rack_rxt_clamps_cwnd_uniq,
1812 "Number of connections that have had excessive rxt clamped the cwnd down");
1813 rack_persists_sends = counter_u64_alloc(M_WAITOK);
1814 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1815 SYSCTL_CHILDREN(rack_counters),
1816 OID_AUTO, "persist_sends", CTLFLAG_RD,
1817 &rack_persists_sends,
1818 "Number of times we sent a persist probe");
1819 rack_persists_acks = counter_u64_alloc(M_WAITOK);
1820 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1821 SYSCTL_CHILDREN(rack_counters),
1822 OID_AUTO, "persist_acks", CTLFLAG_RD,
1823 &rack_persists_acks,
1824 "Number of times a persist probe was acked");
1825 rack_persists_loss = counter_u64_alloc(M_WAITOK);
1826 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1827 SYSCTL_CHILDREN(rack_counters),
1828 OID_AUTO, "persist_loss", CTLFLAG_RD,
1829 &rack_persists_loss,
1830 "Number of times we detected a lost persist probe (no ack)");
1831 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK);
1832 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1833 SYSCTL_CHILDREN(rack_counters),
1834 OID_AUTO, "persist_loss_ends", CTLFLAG_RD,
1835 &rack_persists_lost_ends,
1836 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort");
1838 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK);
1839 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1840 SYSCTL_CHILDREN(rack_counters),
1841 OID_AUTO, "map_adjust_req", CTLFLAG_RD,
1842 &rack_adjust_map_bw,
1843 "Number of times we hit the case where the sb went up and down on a sendmap entry");
1845 rack_multi_single_eq = counter_u64_alloc(M_WAITOK);
1846 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1847 SYSCTL_CHILDREN(rack_counters),
1848 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD,
1849 &rack_multi_single_eq,
1850 "Number of compressed acks total represented");
1851 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK);
1852 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1853 SYSCTL_CHILDREN(rack_counters),
1854 OID_AUTO, "cmp_ack_not", CTLFLAG_RD,
1855 &rack_proc_non_comp_ack,
1856 "Number of non compresseds acks that we processed");
1859 rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
1860 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1861 SYSCTL_CHILDREN(rack_counters),
1862 OID_AUTO, "sack_long", CTLFLAG_RD,
1863 &rack_sack_proc_all,
1864 "Total times we had to walk whole list for sack processing");
1865 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
1866 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1867 SYSCTL_CHILDREN(rack_counters),
1868 OID_AUTO, "sack_restart", CTLFLAG_RD,
1869 &rack_sack_proc_restart,
1870 "Total times we had to walk whole list due to a restart");
1871 rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
1872 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1873 SYSCTL_CHILDREN(rack_counters),
1874 OID_AUTO, "sack_short", CTLFLAG_RD,
1875 &rack_sack_proc_short,
1876 "Total times we took shortcut for sack processing");
1877 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK);
1878 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1879 SYSCTL_CHILDREN(rack_attack),
1880 OID_AUTO, "skipacked", CTLFLAG_RD,
1881 &rack_sack_skipped_acked,
1882 "Total number of times we skipped previously sacked");
1883 rack_sack_splits = counter_u64_alloc(M_WAITOK);
1884 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1885 SYSCTL_CHILDREN(rack_attack),
1886 OID_AUTO, "ofsplit", CTLFLAG_RD,
1888 "Total number of times we did the old fashion tree split");
1889 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
1890 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1891 SYSCTL_CHILDREN(rack_counters),
1892 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
1893 &rack_input_idle_reduces,
1894 "Total number of idle reductions on input");
1895 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK);
1896 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1897 SYSCTL_CHILDREN(rack_counters),
1898 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD,
1899 &rack_collapsed_win_seen,
1900 "Total number of collapsed window events seen (where our window shrinks)");
1902 rack_collapsed_win = counter_u64_alloc(M_WAITOK);
1903 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1904 SYSCTL_CHILDREN(rack_counters),
1905 OID_AUTO, "collapsed_win", CTLFLAG_RD,
1906 &rack_collapsed_win,
1907 "Total number of collapsed window events where we mark packets");
1908 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK);
1909 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1910 SYSCTL_CHILDREN(rack_counters),
1911 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD,
1912 &rack_collapsed_win_rxt,
1913 "Total number of packets that were retransmitted");
1914 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK);
1915 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1916 SYSCTL_CHILDREN(rack_counters),
1917 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD,
1918 &rack_collapsed_win_rxt_bytes,
1919 "Total number of bytes that were retransmitted");
1920 rack_try_scwnd = counter_u64_alloc(M_WAITOK);
1921 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1922 SYSCTL_CHILDREN(rack_counters),
1923 OID_AUTO, "tried_scwnd", CTLFLAG_RD,
1925 "Total number of scwnd attempts");
1926 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
1927 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1928 OID_AUTO, "outsize", CTLFLAG_RD,
1929 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
1930 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
1931 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1932 OID_AUTO, "opts", CTLFLAG_RD,
1933 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
1934 SYSCTL_ADD_PROC(&rack_sysctl_ctx,
1935 SYSCTL_CHILDREN(rack_sysctl_root),
1936 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1937 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
1941 rc_init_window(struct tcp_rack *rack)
1945 if (rack->rc_init_win == 0) {
1947 * Nothing set by the user, use the system stack
1950 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)));
1952 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win;
1957 rack_get_fixed_pacing_bw(struct tcp_rack *rack)
1959 if (IN_FASTRECOVERY(rack->rc_tp->t_flags))
1960 return (rack->r_ctl.rc_fixed_pacing_rate_rec);
1961 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
1962 return (rack->r_ctl.rc_fixed_pacing_rate_ss);
1964 return (rack->r_ctl.rc_fixed_pacing_rate_ca);
1968 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim,
1969 uint64_t data, uint8_t mod, uint16_t aux,
1970 struct http_sendfile_track *cur)
1972 #ifdef TCP_REQUEST_TRK
1976 * The rate cap one is noisy and only should come out when normal BB logging
1977 * is enabled, the other logs (not RATE_CAP and NOT CAP_CALC) only come out
1978 * once per chunk and make up the BBpoint that can be turned on by the client.
1980 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) {
1982 * The very noisy two need to only come out when
1983 * we have verbose logging on.
1985 if (rack_verbose_logging != 0)
1986 do_log = tcp_bblogging_on(rack->rc_tp);
1989 } else if (mod != HYBRID_LOG_BW_MEASURE) {
1991 * All other less noisy logs here except the measure which
1992 * also needs to come out on the point and the log.
1994 do_log = tcp_bblogging_on(rack->rc_tp);
1996 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING);
2000 union tcp_log_stackspecific log;
2004 /* Convert our ms to a microsecond */
2005 memset(&log, 0, sizeof(log));
2007 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2008 log.u_bbr.rttProp = tim;
2009 log.u_bbr.bw_inuse = cbw;
2010 log.u_bbr.delRate = rack_get_gp_est(rack);
2011 lt_bw = rack_get_lt_bw(rack);
2012 log.u_bbr.flex1 = seq;
2013 log.u_bbr.pacing_gain = aux;
2014 /* lt_bw = < flex3 | flex2 > */
2015 log.u_bbr.flex2 = (uint32_t)(lt_bw & 0x00000000ffffffff);
2016 log.u_bbr.flex3 = (uint32_t)((lt_bw >> 32) & 0x00000000ffffffff);
2017 /* Record the last obtained us rtt in inflight */
2019 /* Make sure we are looking at the right log if an overide comes in */
2020 cur = rack->r_ctl.rc_last_sft;
2022 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY)
2023 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt;
2025 /* Use the last known rtt i.e. the rack-rtt */
2026 log.u_bbr.inflight = rack->rc_rack_rtt;
2031 log.u_bbr.cur_del_rate = cur->deadline;
2032 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) {
2033 /* start = < lost | pkt_epoch > */
2034 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff);
2035 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff);
2036 log.u_bbr.flex6 = cur->start_seq;
2037 log.u_bbr.pkts_out = cur->end_seq;
2039 /* start = < lost | pkt_epoch > */
2040 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff);
2041 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff);
2042 /* end = < pkts_out | flex6 > */
2043 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff);
2044 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff);
2046 /* first_send = <lt_epoch | epoch> */
2047 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff);
2048 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff);
2049 /* localtime = <delivered | applimited>*/
2050 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff);
2051 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff);
2052 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_http_info[0]);
2053 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct http_sendfile_track));
2054 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs);
2055 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs);
2056 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags;
2058 log.u_bbr.flex7 = 0xffff;
2059 log.u_bbr.cur_del_rate = 0xffffffffffffffff;
2062 * Compose bbr_state to be a bit wise 0000ADHF
2063 * where A is the always_pace flag
2064 * where D is the dgp_on flag
2065 * where H is the hybrid_mode on flag
2066 * where F is the use_fixed_rate flag.
2068 log.u_bbr.bbr_state = rack->rc_always_pace;
2069 log.u_bbr.bbr_state <<= 1;
2070 log.u_bbr.bbr_state |= rack->dgp_on;
2071 log.u_bbr.bbr_state <<= 1;
2072 log.u_bbr.bbr_state |= rack->rc_hybrid_mode;
2073 log.u_bbr.bbr_state <<= 1;
2074 log.u_bbr.bbr_state |= rack->use_fixed_rate;
2075 log.u_bbr.flex8 = mod;
2076 tcp_log_event(rack->rc_tp, NULL,
2077 &rack->rc_inp->inp_socket->so_rcv,
2078 &rack->rc_inp->inp_socket->so_snd,
2079 TCP_HYBRID_PACING_LOG, 0,
2080 0, &log, false, NULL, __func__, __LINE__, &tv);
2086 static inline uint64_t
2087 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw)
2089 uint64_t ret_bw, ether;
2092 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr);
2095 ether += sizeof(struct ip6_hdr);
2097 ether += 14; /* eheader size 6+6+2 */
2100 ether += sizeof(struct ip);
2102 ether += 14; /* eheader size 6+6+2 */
2104 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs);
2112 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped)
2114 #ifdef TCP_REQUEST_TRK
2116 uint64_t timenow, timeleft, lenleft, lengone, calcbw;
2119 if (rack->r_ctl.bw_rate_cap == 0)
2121 #ifdef TCP_REQUEST_TRK
2122 if (rack->rc_catch_up && rack->rc_hybrid_mode &&
2123 (rack->r_ctl.rc_last_sft != NULL)) {
2125 * We have a dynamic cap. The original target
2126 * is in bw_rate_cap, but we need to look at
2127 * how long it is until we hit the deadline.
2129 struct http_sendfile_track *ent;
2131 ent = rack->r_ctl.rc_last_sft;
2133 timenow = tcp_tv_to_lusectick(&tv);
2134 if (timenow >= ent->deadline) {
2135 /* No time left we do DGP only */
2136 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2137 0, 0, 0, HYBRID_LOG_OUTOFTIME, 0, ent);
2138 rack->r_ctl.bw_rate_cap = 0;
2141 /* We have the time */
2142 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow;
2143 if (timeleft < HPTS_MSEC_IN_SEC) {
2144 /* If there is less than a ms left just use DGPs rate */
2145 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2146 0, timeleft, 0, HYBRID_LOG_OUTOFTIME, 0, ent);
2147 rack->r_ctl.bw_rate_cap = 0;
2151 * Now lets find the amount of data left to send.
2153 * Now ideally we want to use the end_seq to figure out how much more
2154 * but it might not be possible (only if we have the TRACK_FG_COMP on the entry..
2156 if (ent->flags & TCP_HTTP_TRACK_FLG_COMP) {
2157 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una))
2158 lenleft = ent->end_seq - rack->rc_tp->snd_una;
2160 /* TSNH, we should catch it at the send */
2161 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2162 0, timeleft, 0, HYBRID_LOG_CAPERROR, 0, ent);
2163 rack->r_ctl.bw_rate_cap = 0;
2168 * The hard way, figure out how much is gone and then
2169 * take that away from the total the client asked for
2170 * (thats off by tls overhead if this is tls).
2172 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq))
2173 lengone = rack->rc_tp->snd_una - ent->start_seq;
2176 if (lengone < (ent->end - ent->start))
2177 lenleft = (ent->end - ent->start) - lengone;
2179 /* TSNH, we should catch it at the send */
2180 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2181 0, timeleft, lengone, HYBRID_LOG_CAPERROR, 0, ent);
2182 rack->r_ctl.bw_rate_cap = 0;
2187 /* We have it all sent */
2188 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2189 0, timeleft, lenleft, HYBRID_LOG_ALLSENT, 0, ent);
2190 if (rack->r_ctl.bw_rate_cap)
2191 goto normal_ratecap;
2195 calcbw = lenleft * HPTS_USEC_IN_SEC;
2197 /* Now we must compensate for IP/TCP overhead */
2198 calcbw = rack_compensate_for_linerate(rack, calcbw);
2199 /* Update the bit rate cap */
2200 rack->r_ctl.bw_rate_cap = calcbw;
2201 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) &&
2202 (rack_hybrid_allow_set_maxseg == 1) &&
2203 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) {
2204 /* Lets set in a smaller mss possibly here to match our rate-cap */
2207 orig_max = rack->r_ctl.rc_pace_max_segs;
2208 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS;
2209 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp));
2210 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5);
2212 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2213 calcbw, timeleft, lenleft, HYBRID_LOG_CAP_CALC, 0, ent);
2214 if ((calcbw > 0) && (*bw > calcbw)) {
2215 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2216 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent);
2224 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) {
2225 #ifdef TCP_REQUEST_TRK
2226 if (rack->rc_hybrid_mode &&
2227 rack->rc_catch_up &&
2228 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) &&
2229 (rack_hybrid_allow_set_maxseg == 1) &&
2230 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) {
2231 /* Lets set in a smaller mss possibly here to match our rate-cap */
2234 orig_max = rack->r_ctl.rc_pace_max_segs;
2235 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS;
2236 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg(rack->rc_tp));
2237 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5);
2241 *bw = rack->r_ctl.bw_rate_cap;
2242 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2244 HYBRID_LOG_RATE_CAP, 1, NULL);
2249 rack_get_gp_est(struct tcp_rack *rack)
2251 uint64_t bw, lt_bw, ret_bw;
2253 if (rack->rc_gp_filled == 0) {
2255 * We have yet no b/w measurement,
2256 * if we have a user set initial bw
2257 * return it. If we don't have that and
2258 * we have an srtt, use the tcp IW (10) to
2259 * calculate a fictional b/w over the SRTT
2260 * which is more or less a guess. Note
2261 * we don't use our IW from rack on purpose
2262 * so if we have like IW=30, we are not
2263 * calculating a "huge" b/w.
2267 lt_bw = rack_get_lt_bw(rack);
2270 * No goodput bw but a long-term b/w does exist
2276 if (rack->r_ctl.init_rate)
2277 return (rack->r_ctl.init_rate);
2279 /* Ok lets come up with the IW guess, if we have a srtt */
2280 if (rack->rc_tp->t_srtt == 0) {
2282 * Go with old pacing method
2283 * i.e. burst mitigation only.
2287 /* Ok lets get the initial TCP win (not racks) */
2288 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp));
2289 srtt = (uint64_t)rack->rc_tp->t_srtt;
2290 bw *= (uint64_t)USECS_IN_SECOND;
2296 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
2297 /* Averaging is done, we can return the value */
2298 bw = rack->r_ctl.gp_bw;
2300 /* Still doing initial average must calculate */
2301 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1);
2303 lt_bw = rack_get_lt_bw(rack);
2305 /* If we don't have one then equate it to the gp_bw */
2306 lt_bw = rack->r_ctl.gp_bw;
2308 if ((rack->r_cwnd_was_clamped == 1) && (rack->r_clamped_gets_lower > 0)){
2309 /* if clamped take the lowest */
2315 /* If not set for clamped to get lowest, take the highest */
2322 * Now lets compensate based on the TCP/IP overhead. Our
2323 * Goodput estimate does not include this so we must pace out
2324 * a bit faster since our pacing calculations do. The pacing
2325 * calculations use the base ETHERNET_SEGMENT_SIZE and the segsiz
2326 * we are using to do this, so we do that here in the opposite
2327 * direction as well. This means that if we are tunneled and the
2328 * segsiz is say 1200 bytes we will get quite a boost, but its
2329 * compensated for in the pacing time the opposite way.
2332 ret_bw = rack_compensate_for_linerate(rack, ret_bw);
2338 rack_get_bw(struct tcp_rack *rack)
2342 if (rack->use_fixed_rate) {
2343 /* Return the fixed pacing rate */
2344 return (rack_get_fixed_pacing_bw(rack));
2346 bw = rack_get_gp_est(rack);
2351 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm)
2353 if (rack->use_fixed_rate) {
2355 } else if (rack->in_probe_rtt && (rsm == NULL))
2356 return (rack->r_ctl.rack_per_of_gp_probertt);
2357 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
2358 rack->r_ctl.rack_per_of_gp_rec)) {
2360 /* a retransmission always use the recovery rate */
2361 return (rack->r_ctl.rack_per_of_gp_rec);
2362 } else if (rack->rack_rec_nonrxt_use_cr) {
2363 /* Directed to use the configured rate */
2364 goto configured_rate;
2365 } else if (rack->rack_no_prr &&
2366 (rack->r_ctl.rack_per_of_gp_rec > 100)) {
2367 /* No PRR, lets just use the b/w estimate only */
2371 * Here we may have a non-retransmit but we
2372 * have no overrides, so just use the recovery
2373 * rate (prr is in effect).
2375 return (rack->r_ctl.rack_per_of_gp_rec);
2379 /* For the configured rate we look at our cwnd vs the ssthresh */
2380 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
2381 return (rack->r_ctl.rack_per_of_gp_ss);
2383 return (rack->r_ctl.rack_per_of_gp_ca);
2387 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6)
2390 * Types of logs (mod value)
2391 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit.
2392 * 2 = a dsack round begins, persist is reset to 16.
2393 * 3 = a dsack round ends
2394 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh
2395 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack
2396 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh.
2398 if (tcp_bblogging_on(rack->rc_tp)) {
2399 union tcp_log_stackspecific log;
2402 memset(&log, 0, sizeof(log));
2403 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based;
2404 log.u_bbr.flex1 <<= 1;
2405 log.u_bbr.flex1 |= rack->rc_rack_use_dsack;
2406 log.u_bbr.flex1 <<= 1;
2407 log.u_bbr.flex1 |= rack->rc_dsack_round_seen;
2408 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end;
2409 log.u_bbr.flex3 = rack->r_ctl.num_dsack;
2410 log.u_bbr.flex4 = flex4;
2411 log.u_bbr.flex5 = flex5;
2412 log.u_bbr.flex6 = flex6;
2413 log.u_bbr.flex7 = rack->r_ctl.dsack_persist;
2414 log.u_bbr.flex8 = mod;
2415 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2416 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2417 &rack->rc_inp->inp_socket->so_rcv,
2418 &rack->rc_inp->inp_socket->so_snd,
2419 RACK_DSACK_HANDLING, 0,
2420 0, &log, false, &tv);
2425 rack_log_hdwr_pacing(struct tcp_rack *rack,
2426 uint64_t rate, uint64_t hw_rate, int line,
2427 int error, uint16_t mod)
2429 if (tcp_bblogging_on(rack->rc_tp)) {
2430 union tcp_log_stackspecific log;
2432 const struct ifnet *ifp;
2434 memset(&log, 0, sizeof(log));
2435 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff);
2436 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff);
2437 if (rack->r_ctl.crte) {
2438 ifp = rack->r_ctl.crte->ptbl->rs_ifp;
2439 } else if (rack->rc_inp->inp_route.ro_nh &&
2440 rack->rc_inp->inp_route.ro_nh->nh_ifp) {
2441 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp;
2445 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff);
2446 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff);
2448 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2449 log.u_bbr.bw_inuse = rate;
2450 log.u_bbr.flex5 = line;
2451 log.u_bbr.flex6 = error;
2452 log.u_bbr.flex7 = mod;
2453 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs;
2454 log.u_bbr.flex8 = rack->use_fixed_rate;
2455 log.u_bbr.flex8 <<= 1;
2456 log.u_bbr.flex8 |= rack->rack_hdrw_pacing;
2457 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
2458 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate;
2459 if (rack->r_ctl.crte)
2460 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate;
2462 log.u_bbr.cur_del_rate = 0;
2463 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req;
2464 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2465 &rack->rc_inp->inp_socket->so_rcv,
2466 &rack->rc_inp->inp_socket->so_snd,
2467 BBR_LOG_HDWR_PACE, 0,
2468 0, &log, false, &tv);
2473 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped)
2476 * We allow rack_per_of_gp_xx to dictate our bw rate we want.
2478 uint64_t bw_est, high_rate;
2481 if ((rack->r_pacing_discount == 0) ||
2482 (rack_full_buffer_discount == 0)) {
2484 * No buffer level based discount from client buffer
2485 * level is enabled or the feature is disabled.
2487 gain = (uint64_t)rack_get_output_gain(rack, rsm);
2489 bw_est /= (uint64_t)100;
2492 * We have a discount in place apply it with
2493 * just a 100% gain (we get no boost if the buffer
2498 discount = bw * (uint64_t)(rack_full_buffer_discount * rack->r_ctl.pacing_discount_amm);
2500 /* What %% of the b/w do we discount */
2501 bw_est = bw - discount;
2503 /* Never fall below the minimum (def 64kbps) */
2504 if (bw_est < RACK_MIN_BW)
2505 bw_est = RACK_MIN_BW;
2506 if (rack->r_rack_hw_rate_caps) {
2507 /* Rate caps are in place */
2508 if (rack->r_ctl.crte != NULL) {
2509 /* We have a hdwr rate already */
2510 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
2511 if (bw_est >= high_rate) {
2512 /* We are capping bw at the highest rate table entry */
2513 if (rack_hw_rate_cap_per &&
2514 (((high_rate * (100 + rack_hw_rate_cap_per)) / 100) < bw_est)) {
2515 rack->r_rack_hw_rate_caps = 0;
2518 rack_log_hdwr_pacing(rack,
2519 bw_est, high_rate, __LINE__,
2525 } else if ((rack->rack_hdrw_pacing == 0) &&
2526 (rack->rack_hdw_pace_ena) &&
2527 (rack->rack_attempt_hdwr_pace == 0) &&
2528 (rack->rc_inp->inp_route.ro_nh != NULL) &&
2529 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
2531 * Special case, we have not yet attempted hardware
2532 * pacing, and yet we may, when we do, find out if we are
2533 * above the highest rate. We need to know the maxbw for the interface
2534 * in question (if it supports ratelimiting). We get back
2535 * a 0, if the interface is not found in the RL lists.
2537 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
2539 /* Yep, we have a rate is it above this rate? */
2540 if (bw_est > high_rate) {
2553 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod)
2555 if (tcp_bblogging_on(rack->rc_tp)) {
2556 union tcp_log_stackspecific log;
2559 if (rack->sack_attack_disable > 0)
2561 if ((mod != 1) && (rack_verbose_logging == 0)) {
2563 * We get 3 values currently for mod
2564 * 1 - We are retransmitting and this tells the reason.
2565 * 2 - We are clearing a dup-ack count.
2566 * 3 - We are incrementing a dup-ack count.
2568 * The clear/increment are only logged
2569 * if you have BBverbose on.
2574 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2575 log.u_bbr.flex1 = tsused;
2576 log.u_bbr.flex2 = thresh;
2577 log.u_bbr.flex3 = rsm->r_flags;
2578 log.u_bbr.flex4 = rsm->r_dupack;
2579 log.u_bbr.flex5 = rsm->r_start;
2580 log.u_bbr.flex6 = rsm->r_end;
2581 log.u_bbr.flex8 = mod;
2582 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2583 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2584 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2585 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2586 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2587 log.u_bbr.pacing_gain = rack->r_must_retran;
2588 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2589 &rack->rc_inp->inp_socket->so_rcv,
2590 &rack->rc_inp->inp_socket->so_snd,
2591 BBR_LOG_SETTINGS_CHG, 0,
2592 0, &log, false, &tv);
2597 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
2599 if (tcp_bblogging_on(rack->rc_tp)) {
2600 union tcp_log_stackspecific log;
2603 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2604 log.u_bbr.flex1 = rack->rc_tp->t_srtt;
2605 log.u_bbr.flex2 = to;
2606 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
2607 log.u_bbr.flex4 = slot;
2608 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot;
2609 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2610 log.u_bbr.flex7 = rack->rc_in_persist;
2611 log.u_bbr.flex8 = which;
2612 if (rack->rack_no_prr)
2613 log.u_bbr.pkts_out = 0;
2615 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
2616 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2617 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2618 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2619 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2620 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2621 log.u_bbr.pacing_gain = rack->r_must_retran;
2622 log.u_bbr.cwnd_gain = rack->rack_deferred_inited;
2623 log.u_bbr.pkt_epoch = rack->rc_has_collapsed;
2624 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift;
2625 log.u_bbr.lost = rack_rto_min;
2626 log.u_bbr.epoch = rack->r_ctl.roundends;
2627 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2628 &rack->rc_inp->inp_socket->so_rcv,
2629 &rack->rc_inp->inp_socket->so_snd,
2630 BBR_LOG_TIMERSTAR, 0,
2631 0, &log, false, &tv);
2636 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm)
2638 if (tcp_bblogging_on(rack->rc_tp)) {
2639 union tcp_log_stackspecific log;
2642 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2643 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2644 log.u_bbr.flex8 = to_num;
2645 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
2646 log.u_bbr.flex2 = rack->rc_rack_rtt;
2648 log.u_bbr.flex3 = 0;
2650 log.u_bbr.flex3 = rsm->r_end - rsm->r_start;
2651 if (rack->rack_no_prr)
2652 log.u_bbr.flex5 = 0;
2654 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2655 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2656 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2657 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2658 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2659 log.u_bbr.pacing_gain = rack->r_must_retran;
2660 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2661 &rack->rc_inp->inp_socket->so_rcv,
2662 &rack->rc_inp->inp_socket->so_snd,
2664 0, &log, false, &tv);
2669 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack,
2670 struct rack_sendmap *prev,
2671 struct rack_sendmap *rsm,
2672 struct rack_sendmap *next,
2673 int flag, uint32_t th_ack, int line)
2675 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
2676 union tcp_log_stackspecific log;
2679 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2680 log.u_bbr.flex8 = flag;
2681 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2682 log.u_bbr.cur_del_rate = (uint64_t)prev;
2683 log.u_bbr.delRate = (uint64_t)rsm;
2684 log.u_bbr.rttProp = (uint64_t)next;
2685 log.u_bbr.flex7 = 0;
2687 log.u_bbr.flex1 = prev->r_start;
2688 log.u_bbr.flex2 = prev->r_end;
2689 log.u_bbr.flex7 |= 0x4;
2692 log.u_bbr.flex3 = rsm->r_start;
2693 log.u_bbr.flex4 = rsm->r_end;
2694 log.u_bbr.flex7 |= 0x2;
2697 log.u_bbr.flex5 = next->r_start;
2698 log.u_bbr.flex6 = next->r_end;
2699 log.u_bbr.flex7 |= 0x1;
2701 log.u_bbr.applimited = line;
2702 log.u_bbr.pkts_out = th_ack;
2703 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2704 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2705 if (rack->rack_no_prr)
2708 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt;
2709 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2710 &rack->rc_inp->inp_socket->so_rcv,
2711 &rack->rc_inp->inp_socket->so_snd,
2713 0, &log, false, &tv);
2718 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len,
2719 struct rack_sendmap *rsm, int conf)
2721 if (tcp_bblogging_on(tp)) {
2722 union tcp_log_stackspecific log;
2724 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2725 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2726 log.u_bbr.flex1 = t;
2727 log.u_bbr.flex2 = len;
2728 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt;
2729 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
2730 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;
2731 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2732 log.u_bbr.flex7 = conf;
2733 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot;
2734 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
2735 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2736 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2737 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags;
2738 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2740 log.u_bbr.pkt_epoch = rsm->r_start;
2741 log.u_bbr.lost = rsm->r_end;
2742 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt;
2743 /* We loose any upper of the 24 bits */
2744 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags;
2747 log.u_bbr.pkt_epoch = rack->rc_tp->iss;
2749 log.u_bbr.cwnd_gain = 0;
2750 log.u_bbr.pacing_gain = 0;
2752 /* Write out general bits of interest rrs here */
2753 log.u_bbr.use_lt_bw = rack->rc_highly_buffered;
2754 log.u_bbr.use_lt_bw <<= 1;
2755 log.u_bbr.use_lt_bw |= rack->forced_ack;
2756 log.u_bbr.use_lt_bw <<= 1;
2757 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul;
2758 log.u_bbr.use_lt_bw <<= 1;
2759 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
2760 log.u_bbr.use_lt_bw <<= 1;
2761 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
2762 log.u_bbr.use_lt_bw <<= 1;
2763 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
2764 log.u_bbr.use_lt_bw <<= 1;
2765 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
2766 log.u_bbr.use_lt_bw <<= 1;
2767 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom;
2768 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight;
2769 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts;
2770 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered;
2771 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts;
2772 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt;
2773 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
2774 log.u_bbr.bw_inuse <<= 32;
2776 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]);
2777 TCP_LOG_EVENTP(tp, NULL,
2778 &rack->rc_inp->inp_socket->so_rcv,
2779 &rack->rc_inp->inp_socket->so_snd,
2781 0, &log, false, &tv);
2788 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
2791 * Log the rtt sample we are
2792 * applying to the srtt algorithm in
2795 if (tcp_bblogging_on(rack->rc_tp)) {
2796 union tcp_log_stackspecific log;
2799 /* Convert our ms to a microsecond */
2800 memset(&log, 0, sizeof(log));
2801 log.u_bbr.flex1 = rtt;
2802 log.u_bbr.flex2 = rack->r_ctl.ack_count;
2803 log.u_bbr.flex3 = rack->r_ctl.sack_count;
2804 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
2805 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra;
2806 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2807 log.u_bbr.flex7 = 1;
2808 log.u_bbr.flex8 = rack->sack_attack_disable;
2809 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2810 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2811 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2812 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2813 log.u_bbr.pacing_gain = rack->r_must_retran;
2815 * We capture in delRate the upper 32 bits as
2816 * the confidence level we had declared, and the
2817 * lower 32 bits as the actual RTT using the arrival
2820 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence;
2821 log.u_bbr.delRate <<= 32;
2822 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt;
2823 /* Lets capture all the things that make up t_rtxcur */
2824 log.u_bbr.applimited = rack_rto_min;
2825 log.u_bbr.epoch = rack_rto_max;
2826 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop;
2827 log.u_bbr.lost = rack_rto_min;
2828 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop);
2829 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp);
2830 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec;
2831 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC;
2832 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec;
2833 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2834 &rack->rc_inp->inp_socket->so_rcv,
2835 &rack->rc_inp->inp_socket->so_snd,
2837 0, &log, false, &tv);
2842 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where)
2844 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
2845 union tcp_log_stackspecific log;
2848 /* Convert our ms to a microsecond */
2849 memset(&log, 0, sizeof(log));
2850 log.u_bbr.flex1 = rtt;
2851 log.u_bbr.flex2 = send_time;
2852 log.u_bbr.flex3 = ack_time;
2853 log.u_bbr.flex4 = where;
2854 log.u_bbr.flex7 = 2;
2855 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2856 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2857 &rack->rc_inp->inp_socket->so_rcv,
2858 &rack->rc_inp->inp_socket->so_snd,
2860 0, &log, false, &tv);
2866 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho)
2868 if (tcp_bblogging_on(rack->rc_tp)) {
2869 union tcp_log_stackspecific log;
2872 /* Convert our ms to a microsecond */
2873 memset(&log, 0, sizeof(log));
2874 log.u_bbr.flex1 = idx;
2875 log.u_bbr.flex2 = rack_ts_to_msec(tsv);
2876 log.u_bbr.flex3 = tsecho;
2877 log.u_bbr.flex7 = 3;
2878 log.u_bbr.rttProp = tsv;
2879 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2880 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2881 &rack->rc_inp->inp_socket->so_rcv,
2882 &rack->rc_inp->inp_socket->so_snd,
2884 0, &log, false, &tv);
2890 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line)
2892 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
2893 union tcp_log_stackspecific log;
2896 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2897 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2898 log.u_bbr.flex1 = line;
2899 log.u_bbr.flex2 = tick;
2900 log.u_bbr.flex3 = tp->t_maxunacktime;
2901 log.u_bbr.flex4 = tp->t_acktime;
2902 log.u_bbr.flex8 = event;
2903 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2904 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2905 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2906 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2907 log.u_bbr.pacing_gain = rack->r_must_retran;
2908 TCP_LOG_EVENTP(tp, NULL,
2909 &rack->rc_inp->inp_socket->so_rcv,
2910 &rack->rc_inp->inp_socket->so_snd,
2911 BBR_LOG_PROGRESS, 0,
2912 0, &log, false, &tv);
2917 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line)
2919 if (tcp_bblogging_on(rack->rc_tp)) {
2920 union tcp_log_stackspecific log;
2922 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2923 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2924 log.u_bbr.flex1 = slot;
2925 if (rack->rack_no_prr)
2926 log.u_bbr.flex2 = 0;
2928 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt;
2929 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2930 log.u_bbr.flex5 = rack->r_ctl.ack_during_sd;
2931 log.u_bbr.flex6 = line;
2932 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
2933 log.u_bbr.flex8 = rack->rc_in_persist;
2934 log.u_bbr.timeStamp = cts;
2935 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2936 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2937 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2938 log.u_bbr.pacing_gain = rack->r_must_retran;
2939 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2940 &rack->rc_inp->inp_socket->so_rcv,
2941 &rack->rc_inp->inp_socket->so_snd,
2943 0, &log, false, tv);
2948 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs)
2950 if (tcp_bblogging_on(rack->rc_tp)) {
2951 union tcp_log_stackspecific log;
2954 memset(&log, 0, sizeof(log));
2955 log.u_bbr.flex1 = did_out;
2956 log.u_bbr.flex2 = nxt_pkt;
2957 log.u_bbr.flex3 = way_out;
2958 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2959 if (rack->rack_no_prr)
2960 log.u_bbr.flex5 = 0;
2962 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2963 log.u_bbr.flex6 = nsegs;
2964 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs;
2965 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */
2966 log.u_bbr.flex7 <<= 1;
2967 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */
2968 log.u_bbr.flex7 <<= 1;
2969 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */
2970 log.u_bbr.flex8 = rack->rc_in_persist;
2971 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
2972 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2973 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2974 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
2975 log.u_bbr.use_lt_bw <<= 1;
2976 log.u_bbr.use_lt_bw |= rack->r_might_revert;
2977 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2978 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2979 log.u_bbr.pacing_gain = rack->r_must_retran;
2980 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2981 &rack->rc_inp->inp_socket->so_rcv,
2982 &rack->rc_inp->inp_socket->so_snd,
2983 BBR_LOG_DOSEG_DONE, 0,
2984 0, &log, false, &tv);
2989 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm)
2991 if (tcp_bblogging_on(rack->rc_tp)) {
2992 union tcp_log_stackspecific log;
2995 memset(&log, 0, sizeof(log));
2996 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs;
2997 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
2998 log.u_bbr.flex4 = arg1;
2999 log.u_bbr.flex5 = arg2;
3000 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs;
3001 log.u_bbr.flex6 = arg3;
3002 log.u_bbr.flex8 = frm;
3003 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3004 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3005 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
3006 log.u_bbr.applimited = rack->r_ctl.rc_sacked;
3007 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
3008 log.u_bbr.pacing_gain = rack->r_must_retran;
3009 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv,
3010 &tptosocket(tp)->so_snd,
3011 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv);
3016 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot,
3017 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use)
3019 if (tcp_bblogging_on(rack->rc_tp)) {
3020 union tcp_log_stackspecific log;
3023 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3024 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
3025 log.u_bbr.flex1 = slot;
3026 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
3027 log.u_bbr.flex4 = reason;
3028 if (rack->rack_no_prr)
3029 log.u_bbr.flex5 = 0;
3031 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
3032 log.u_bbr.flex7 = hpts_calling;
3033 log.u_bbr.flex8 = rack->rc_in_persist;
3034 log.u_bbr.lt_epoch = cwnd_to_use;
3035 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3036 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3037 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
3038 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
3039 log.u_bbr.pacing_gain = rack->r_must_retran;
3040 log.u_bbr.cwnd_gain = rack->rc_has_collapsed;
3041 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3042 &rack->rc_inp->inp_socket->so_rcv,
3043 &rack->rc_inp->inp_socket->so_snd,
3045 tlen, &log, false, &tv);
3050 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts,
3051 struct timeval *tv, uint32_t flags_on_entry)
3053 if (tcp_bblogging_on(rack->rc_tp)) {
3054 union tcp_log_stackspecific log;
3056 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3057 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
3058 log.u_bbr.flex1 = line;
3059 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to;
3060 log.u_bbr.flex3 = flags_on_entry;
3061 log.u_bbr.flex4 = us_cts;
3062 if (rack->rack_no_prr)
3063 log.u_bbr.flex5 = 0;
3065 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
3066 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
3067 log.u_bbr.flex7 = hpts_removed;
3068 log.u_bbr.flex8 = 1;
3069 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags;
3070 log.u_bbr.timeStamp = us_cts;
3071 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3072 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
3073 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
3074 log.u_bbr.pacing_gain = rack->r_must_retran;
3075 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3076 &rack->rc_inp->inp_socket->so_rcv,
3077 &rack->rc_inp->inp_socket->so_snd,
3078 BBR_LOG_TIMERCANC, 0,
3079 0, &log, false, tv);
3084 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
3085 uint32_t flex1, uint32_t flex2,
3086 uint32_t flex3, uint32_t flex4,
3087 uint32_t flex5, uint32_t flex6,
3088 uint16_t flex7, uint8_t mod)
3090 if (tcp_bblogging_on(rack->rc_tp)) {
3091 union tcp_log_stackspecific log;
3095 /* No you can't use 1, its for the real to cancel */
3098 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3099 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3100 log.u_bbr.flex1 = flex1;
3101 log.u_bbr.flex2 = flex2;
3102 log.u_bbr.flex3 = flex3;
3103 log.u_bbr.flex4 = flex4;
3104 log.u_bbr.flex5 = flex5;
3105 log.u_bbr.flex6 = flex6;
3106 log.u_bbr.flex7 = flex7;
3107 log.u_bbr.flex8 = mod;
3108 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3109 &rack->rc_inp->inp_socket->so_rcv,
3110 &rack->rc_inp->inp_socket->so_snd,
3111 BBR_LOG_TIMERCANC, 0,
3112 0, &log, false, &tv);
3117 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
3119 if (tcp_bblogging_on(rack->rc_tp)) {
3120 union tcp_log_stackspecific log;
3123 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3124 log.u_bbr.flex1 = timers;
3125 log.u_bbr.flex2 = ret;
3126 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
3127 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
3128 log.u_bbr.flex5 = cts;
3129 if (rack->rack_no_prr)
3130 log.u_bbr.flex6 = 0;
3132 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt;
3133 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
3134 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
3135 log.u_bbr.pacing_gain = rack->r_must_retran;
3136 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3137 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3138 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3139 &rack->rc_inp->inp_socket->so_rcv,
3140 &rack->rc_inp->inp_socket->so_snd,
3141 BBR_LOG_TO_PROCESS, 0,
3142 0, &log, false, &tv);
3147 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line)
3149 if (tcp_bblogging_on(rack->rc_tp)) {
3150 union tcp_log_stackspecific log;
3153 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3154 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out;
3155 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs;
3156 if (rack->rack_no_prr)
3157 log.u_bbr.flex3 = 0;
3159 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt;
3160 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered;
3161 log.u_bbr.flex5 = rack->r_ctl.rc_sacked;
3162 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt;
3163 log.u_bbr.flex7 = line;
3164 log.u_bbr.flex8 = frm;
3165 log.u_bbr.pkts_out = orig_cwnd;
3166 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3167 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3168 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
3169 log.u_bbr.use_lt_bw <<= 1;
3170 log.u_bbr.use_lt_bw |= rack->r_might_revert;
3171 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3172 &rack->rc_inp->inp_socket->so_rcv,
3173 &rack->rc_inp->inp_socket->so_snd,
3175 0, &log, false, &tv);
3179 #ifdef TCP_SAD_DETECTION
3181 rack_log_sad(struct tcp_rack *rack, int event)
3183 if (tcp_bblogging_on(rack->rc_tp)) {
3184 union tcp_log_stackspecific log;
3187 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3188 log.u_bbr.flex1 = rack->r_ctl.sack_count;
3189 log.u_bbr.flex2 = rack->r_ctl.ack_count;
3190 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra;
3191 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
3192 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced;
3193 log.u_bbr.flex6 = tcp_sack_to_ack_thresh;
3194 log.u_bbr.pkts_out = tcp_sack_to_move_thresh;
3195 log.u_bbr.lt_epoch = (tcp_force_detection << 8);
3196 log.u_bbr.lt_epoch |= rack->do_detection;
3197 log.u_bbr.applimited = tcp_map_minimum;
3198 log.u_bbr.flex7 = rack->sack_attack_disable;
3199 log.u_bbr.flex8 = event;
3200 log.u_bbr.bbr_state = rack->rc_suspicious;
3201 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3202 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3203 log.u_bbr.delivered = tcp_sad_decay_val;
3204 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3205 &rack->rc_inp->inp_socket->so_rcv,
3206 &rack->rc_inp->inp_socket->so_snd,
3208 0, &log, false, &tv);
3214 rack_counter_destroy(void)
3216 counter_u64_free(rack_total_bytes);
3217 counter_u64_free(rack_fto_send);
3218 counter_u64_free(rack_fto_rsm_send);
3219 counter_u64_free(rack_nfto_resend);
3220 counter_u64_free(rack_hw_pace_init_fail);
3221 counter_u64_free(rack_hw_pace_lost);
3222 counter_u64_free(rack_non_fto_send);
3223 counter_u64_free(rack_extended_rfo);
3224 counter_u64_free(rack_ack_total);
3225 counter_u64_free(rack_express_sack);
3226 counter_u64_free(rack_sack_total);
3227 counter_u64_free(rack_move_none);
3228 counter_u64_free(rack_move_some);
3229 counter_u64_free(rack_sack_attacks_detected);
3230 counter_u64_free(rack_sack_attacks_reversed);
3231 counter_u64_free(rack_sack_attacks_suspect);
3232 counter_u64_free(rack_sack_used_next_merge);
3233 counter_u64_free(rack_sack_used_prev_merge);
3234 counter_u64_free(rack_tlp_tot);
3235 counter_u64_free(rack_tlp_newdata);
3236 counter_u64_free(rack_tlp_retran);
3237 counter_u64_free(rack_tlp_retran_bytes);
3238 counter_u64_free(rack_to_tot);
3239 counter_u64_free(rack_saw_enobuf);
3240 counter_u64_free(rack_saw_enobuf_hw);
3241 counter_u64_free(rack_saw_enetunreach);
3242 counter_u64_free(rack_hot_alloc);
3243 counter_u64_free(rack_to_alloc);
3244 counter_u64_free(rack_to_alloc_hard);
3245 counter_u64_free(rack_to_alloc_emerg);
3246 counter_u64_free(rack_to_alloc_limited);
3247 counter_u64_free(rack_alloc_limited_conns);
3248 counter_u64_free(rack_split_limited);
3249 counter_u64_free(rack_multi_single_eq);
3250 counter_u64_free(rack_rxt_clamps_cwnd);
3251 counter_u64_free(rack_rxt_clamps_cwnd_uniq);
3252 counter_u64_free(rack_proc_non_comp_ack);
3253 counter_u64_free(rack_sack_proc_all);
3254 counter_u64_free(rack_sack_proc_restart);
3255 counter_u64_free(rack_sack_proc_short);
3256 counter_u64_free(rack_sack_skipped_acked);
3257 counter_u64_free(rack_sack_splits);
3258 counter_u64_free(rack_input_idle_reduces);
3259 counter_u64_free(rack_collapsed_win);
3260 counter_u64_free(rack_collapsed_win_rxt);
3261 counter_u64_free(rack_collapsed_win_rxt_bytes);
3262 counter_u64_free(rack_collapsed_win_seen);
3263 counter_u64_free(rack_try_scwnd);
3264 counter_u64_free(rack_persists_sends);
3265 counter_u64_free(rack_persists_acks);
3266 counter_u64_free(rack_persists_loss);
3267 counter_u64_free(rack_persists_lost_ends);
3269 counter_u64_free(rack_adjust_map_bw);
3271 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
3272 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
3275 static struct rack_sendmap *
3276 rack_alloc(struct tcp_rack *rack)
3278 struct rack_sendmap *rsm;
3281 * First get the top of the list it in
3282 * theory is the "hottest" rsm we have,
3283 * possibly just freed by ack processing.
3285 if (rack->rc_free_cnt > rack_free_cache) {
3286 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
3287 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
3288 counter_u64_add(rack_hot_alloc, 1);
3289 rack->rc_free_cnt--;
3293 * Once we get under our free cache we probably
3294 * no longer have a "hot" one available. Lets
3297 rsm = uma_zalloc(rack_zone, M_NOWAIT);
3299 rack->r_ctl.rc_num_maps_alloced++;
3300 counter_u64_add(rack_to_alloc, 1);
3304 * Dig in to our aux rsm's (the last two) since
3305 * UMA failed to get us one.
3307 if (rack->rc_free_cnt) {
3308 counter_u64_add(rack_to_alloc_emerg, 1);
3309 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
3310 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
3311 rack->rc_free_cnt--;
3317 static struct rack_sendmap *
3318 rack_alloc_full_limit(struct tcp_rack *rack)
3320 if ((V_tcp_map_entries_limit > 0) &&
3321 (rack->do_detection == 0) &&
3322 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
3323 counter_u64_add(rack_to_alloc_limited, 1);
3324 if (!rack->alloc_limit_reported) {
3325 rack->alloc_limit_reported = 1;
3326 counter_u64_add(rack_alloc_limited_conns, 1);
3330 return (rack_alloc(rack));
3333 /* wrapper to allocate a sendmap entry, subject to a specific limit */
3334 static struct rack_sendmap *
3335 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type)
3337 struct rack_sendmap *rsm;
3340 /* currently there is only one limit type */
3341 if (rack->r_ctl.rc_split_limit > 0 &&
3342 (rack->do_detection == 0) &&
3343 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) {
3344 counter_u64_add(rack_split_limited, 1);
3345 if (!rack->alloc_limit_reported) {
3346 rack->alloc_limit_reported = 1;
3347 counter_u64_add(rack_alloc_limited_conns, 1);
3350 #ifdef TCP_SAD_DETECTION
3351 } else if ((tcp_sad_limit != 0) &&
3352 (rack->do_detection == 1) &&
3353 (rack->r_ctl.rc_num_split_allocs >= tcp_sad_limit)) {
3354 counter_u64_add(rack_split_limited, 1);
3355 if (!rack->alloc_limit_reported) {
3356 rack->alloc_limit_reported = 1;
3357 counter_u64_add(rack_alloc_limited_conns, 1);
3364 /* allocate and mark in the limit type, if set */
3365 rsm = rack_alloc(rack);
3366 if (rsm != NULL && limit_type) {
3367 rsm->r_limit_type = limit_type;
3368 rack->r_ctl.rc_num_split_allocs++;
3374 rack_free_trim(struct tcp_rack *rack)
3376 struct rack_sendmap *rsm;
3379 * Free up all the tail entries until
3380 * we get our list down to the limit.
3382 while (rack->rc_free_cnt > rack_free_cache) {
3383 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head);
3384 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
3385 rack->rc_free_cnt--;
3386 rack->r_ctl.rc_num_maps_alloced--;
3387 uma_zfree(rack_zone, rsm);
3392 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
3394 if (rsm->r_flags & RACK_APP_LIMITED) {
3395 if (rack->r_ctl.rc_app_limited_cnt > 0) {
3396 rack->r_ctl.rc_app_limited_cnt--;
3399 if (rsm->r_limit_type) {
3400 /* currently there is only one limit type */
3401 rack->r_ctl.rc_num_split_allocs--;
3403 if (rsm == rack->r_ctl.rc_first_appl) {
3404 if (rack->r_ctl.rc_app_limited_cnt == 0)
3405 rack->r_ctl.rc_first_appl = NULL;
3407 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl);
3409 if (rsm == rack->r_ctl.rc_resend)
3410 rack->r_ctl.rc_resend = NULL;
3411 if (rsm == rack->r_ctl.rc_end_appl)
3412 rack->r_ctl.rc_end_appl = NULL;
3413 if (rack->r_ctl.rc_tlpsend == rsm)
3414 rack->r_ctl.rc_tlpsend = NULL;
3415 if (rack->r_ctl.rc_sacklast == rsm)
3416 rack->r_ctl.rc_sacklast = NULL;
3417 memset(rsm, 0, sizeof(struct rack_sendmap));
3418 /* Make sure we are not going to overrun our count limit of 0xff */
3419 if ((rack->rc_free_cnt + 1) > 0xff) {
3420 rack_free_trim(rack);
3422 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext);
3423 rack->rc_free_cnt++;
3427 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack)
3429 uint64_t srtt, bw, len, tim;
3430 uint32_t segsiz, def_len, minl;
3432 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
3433 def_len = rack_def_data_window * segsiz;
3434 if (rack->rc_gp_filled == 0) {
3436 * We have no measurement (IW is in flight?) so
3437 * we can only guess using our data_window sysctl
3438 * value (usually 20MSS).
3443 * Now we have a number of factors to consider.
3445 * 1) We have a desired BDP which is usually
3447 * 2) We have a minimum number of rtt's usually 1 SRTT
3448 * but we allow it too to be more.
3449 * 3) We want to make sure a measurement last N useconds (if
3450 * we have set rack_min_measure_usec.
3452 * We handle the first concern here by trying to create a data
3453 * window of max(rack_def_data_window, DesiredBDP). The
3454 * second concern we handle in not letting the measurement
3455 * window end normally until at least the required SRTT's
3456 * have gone by which is done further below in
3457 * rack_enough_for_measurement(). Finally the third concern
3458 * we also handle here by calculating how long that time
3459 * would take at the current BW and then return the
3460 * max of our first calculation and that length. Note
3461 * that if rack_min_measure_usec is 0, we don't deal
3462 * with concern 3. Also for both Concern 1 and 3 an
3463 * application limited period could end the measurement
3466 * So lets calculate the BDP with the "known" b/w using
3467 * the SRTT has our rtt and then multiply it by the
3470 bw = rack_get_bw(rack);
3471 srtt = (uint64_t)tp->t_srtt;
3473 len /= (uint64_t)HPTS_USEC_IN_SEC;
3474 len *= max(1, rack_goal_bdp);
3475 /* Now we need to round up to the nearest MSS */
3476 len = roundup(len, segsiz);
3477 if (rack_min_measure_usec) {
3478 /* Now calculate our min length for this b/w */
3479 tim = rack_min_measure_usec;
3480 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC;
3483 minl = roundup(minl, segsiz);
3488 * Now if we have a very small window we want
3489 * to attempt to get the window that is
3490 * as small as possible. This happens on
3491 * low b/w connections and we don't want to
3492 * span huge numbers of rtt's between measurements.
3494 * We basically include 2 over our "MIN window" so
3495 * that the measurement can be shortened (possibly) by
3499 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz)));
3501 return (max((uint32_t)len, def_len));
3506 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality)
3508 uint32_t tim, srtts, segsiz;
3511 * Has enough time passed for the GP measurement to be valid?
3513 if (SEQ_LT(th_ack, tp->gput_seq)) {
3514 /* Not enough bytes yet */
3517 if ((tp->snd_max == tp->snd_una) ||
3518 (th_ack == tp->snd_max)){
3520 * All is acked quality of all acked is
3521 * usually low or medium, but we in theory could split
3522 * all acked into two cases, where you got
3523 * a signifigant amount of your window and
3524 * where you did not. For now we leave it
3525 * but it is something to contemplate in the
3526 * future. The danger here is that delayed ack
3527 * is effecting the last byte (which is a 50:50 chance).
3529 *quality = RACK_QUALITY_ALLACKED;
3532 if (SEQ_GEQ(th_ack, tp->gput_ack)) {
3534 * We obtained our entire window of data we wanted
3535 * no matter if we are in recovery or not then
3536 * its ok since expanding the window does not
3537 * make things fuzzy (or at least not as much).
3539 *quality = RACK_QUALITY_HIGH;
3542 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
3543 if (SEQ_LT(th_ack, tp->gput_ack) &&
3544 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
3545 /* Not enough bytes yet */
3548 if (rack->r_ctl.rc_first_appl &&
3549 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) {
3551 * We are up to the app limited send point
3552 * we have to measure irrespective of the time..
3554 *quality = RACK_QUALITY_APPLIMITED;
3557 /* Now what about time? */
3558 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts);
3559 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts;
3560 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) {
3562 * We do not allow a measurement if we are in recovery
3563 * that would shrink the goodput window we wanted.
3564 * This is to prevent cloudyness of when the last send
3565 * was actually made.
3567 *quality = RACK_QUALITY_HIGH;
3570 /* Nope not even a full SRTT has passed */
3575 rack_log_timely(struct tcp_rack *rack,
3576 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd,
3577 uint64_t up_bnd, int line, uint8_t method)
3579 if (tcp_bblogging_on(rack->rc_tp)) {
3580 union tcp_log_stackspecific log;
3583 memset(&log, 0, sizeof(log));
3584 log.u_bbr.flex1 = logged;
3585 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt;
3586 log.u_bbr.flex2 <<= 4;
3587 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt;
3588 log.u_bbr.flex2 <<= 4;
3589 log.u_bbr.flex2 |= rack->rc_gp_incr;
3590 log.u_bbr.flex2 <<= 4;
3591 log.u_bbr.flex2 |= rack->rc_gp_bwred;
3592 log.u_bbr.flex3 = rack->rc_gp_incr;
3593 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
3594 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca;
3595 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec;
3596 log.u_bbr.flex7 = rack->rc_gp_bwred;
3597 log.u_bbr.flex8 = method;
3598 log.u_bbr.cur_del_rate = cur_bw;
3599 log.u_bbr.delRate = low_bnd;
3600 log.u_bbr.bw_inuse = up_bnd;
3601 log.u_bbr.rttProp = rack_get_bw(rack);
3602 log.u_bbr.pkt_epoch = line;
3603 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
3604 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3605 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3606 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
3607 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
3608 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom;
3609 log.u_bbr.cwnd_gain <<= 1;
3610 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec;
3611 log.u_bbr.cwnd_gain <<= 1;
3612 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
3613 log.u_bbr.cwnd_gain <<= 1;
3614 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
3615 log.u_bbr.lost = rack->r_ctl.rc_loss_count;
3616 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3617 &rack->rc_inp->inp_socket->so_rcv,
3618 &rack->rc_inp->inp_socket->so_snd,
3620 0, &log, false, &tv);
3625 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult)
3628 * Before we increase we need to know if
3629 * the estimate just made was less than
3630 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est)
3632 * If we already are pacing at a fast enough
3633 * rate to push us faster there is no sense of
3636 * We first caculate our actual pacing rate (ss or ca multiplier
3637 * times our cur_bw).
3639 * Then we take the last measured rate and multipy by our
3640 * maximum pacing overage to give us a max allowable rate.
3642 * If our act_rate is smaller than our max_allowable rate
3643 * then we should increase. Else we should hold steady.
3646 uint64_t act_rate, max_allow_rate;
3648 if (rack_timely_no_stopping)
3651 if ((cur_bw == 0) || (last_bw_est == 0)) {
3653 * Initial startup case or
3654 * everything is acked case.
3656 rack_log_timely(rack, mult, cur_bw, 0, 0,
3662 * We can always pace at or slightly above our rate.
3664 rack_log_timely(rack, mult, cur_bw, 0, 0,
3668 act_rate = cur_bw * (uint64_t)mult;
3670 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100);
3671 max_allow_rate /= 100;
3672 if (act_rate < max_allow_rate) {
3674 * Here the rate we are actually pacing at
3675 * is smaller than 10% above our last measurement.
3676 * This means we are pacing below what we would
3677 * like to try to achieve (plus some wiggle room).
3679 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
3684 * Here we are already pacing at least rack_max_per_above(10%)
3685 * what we are getting back. This indicates most likely
3686 * that we are being limited (cwnd/rwnd/app) and can't
3687 * get any more b/w. There is no sense of trying to
3688 * raise up the pacing rate its not speeding us up
3689 * and we already are pacing faster than we are getting.
3691 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
3698 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack)
3701 * When we drag bottom, we want to assure
3702 * that no multiplier is below 1.0, if so
3703 * we want to restore it to at least that.
3705 if (rack->r_ctl.rack_per_of_gp_rec < 100) {
3706 /* This is unlikely we usually do not touch recovery */
3707 rack->r_ctl.rack_per_of_gp_rec = 100;
3709 if (rack->r_ctl.rack_per_of_gp_ca < 100) {
3710 rack->r_ctl.rack_per_of_gp_ca = 100;
3712 if (rack->r_ctl.rack_per_of_gp_ss < 100) {
3713 rack->r_ctl.rack_per_of_gp_ss = 100;
3718 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack)
3720 if (rack->r_ctl.rack_per_of_gp_ca > 100) {
3721 rack->r_ctl.rack_per_of_gp_ca = 100;
3723 if (rack->r_ctl.rack_per_of_gp_ss > 100) {
3724 rack->r_ctl.rack_per_of_gp_ss = 100;
3729 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override)
3731 int32_t calc, logged, plus;
3737 * override is passed when we are
3738 * loosing b/w and making one last
3739 * gasp at trying to not loose out
3740 * to a new-reno flow.
3744 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */
3745 if (rack->rc_gp_incr &&
3746 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) {
3748 * Reset and get 5 strokes more before the boost. Note
3749 * that the count is 0 based so we have to add one.
3752 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST;
3753 rack->rc_gp_timely_inc_cnt = 0;
3755 plus = (uint32_t)rack_gp_increase_per;
3756 /* Must be at least 1% increase for true timely increases */
3758 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0)))
3760 if (rack->rc_gp_saw_rec &&
3761 (rack->rc_gp_no_rec_chg == 0) &&
3762 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3763 rack->r_ctl.rack_per_of_gp_rec)) {
3764 /* We have been in recovery ding it too */
3765 calc = rack->r_ctl.rack_per_of_gp_rec + plus;
3769 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc;
3770 if (rack->r_ctl.rack_per_upper_bound_ca &&
3771 (rack->rc_dragged_bottom == 0) &&
3772 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca))
3773 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca;
3775 if (rack->rc_gp_saw_ca &&
3776 (rack->rc_gp_saw_ss == 0) &&
3777 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3778 rack->r_ctl.rack_per_of_gp_ca)) {
3780 calc = rack->r_ctl.rack_per_of_gp_ca + plus;
3784 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc;
3785 if (rack->r_ctl.rack_per_upper_bound_ca &&
3786 (rack->rc_dragged_bottom == 0) &&
3787 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca))
3788 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca;
3790 if (rack->rc_gp_saw_ss &&
3791 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3792 rack->r_ctl.rack_per_of_gp_ss)) {
3794 calc = rack->r_ctl.rack_per_of_gp_ss + plus;
3797 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc;
3798 if (rack->r_ctl.rack_per_upper_bound_ss &&
3799 (rack->rc_dragged_bottom == 0) &&
3800 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss))
3801 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss;
3805 (rack->rc_gp_incr == 0)){
3806 /* Go into increment mode */
3807 rack->rc_gp_incr = 1;
3808 rack->rc_gp_timely_inc_cnt = 0;
3810 if (rack->rc_gp_incr &&
3812 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) {
3813 rack->rc_gp_timely_inc_cnt++;
3815 rack_log_timely(rack, logged, plus, 0, 0,
3820 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff)
3823 * norm_grad = rtt_diff / minrtt;
3824 * new_per = curper * (1 - B * norm_grad)
3826 * B = rack_gp_decrease_per (default 10%)
3827 * rtt_dif = input var current rtt-diff
3828 * curper = input var current percentage
3829 * minrtt = from rack filter
3834 perf = (((uint64_t)curper * ((uint64_t)1000000 -
3835 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 *
3836 (((uint64_t)rtt_diff * (uint64_t)1000000)/
3837 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/
3838 (uint64_t)1000000)) /
3840 if (perf > curper) {
3844 return ((uint32_t)perf);
3848 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt)
3852 * result = curper * (1 - (B * ( 1 - ------ ))
3855 * B = rack_gp_decrease_per (default 10%)
3856 * highrttthresh = filter_min * rack_gp_rtt_maxmul
3859 uint32_t highrttthresh;
3861 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
3863 perf = (((uint64_t)curper * ((uint64_t)1000000 -
3864 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 -
3865 ((uint64_t)highrttthresh * (uint64_t)1000000) /
3866 (uint64_t)rtt)) / 100)) /(uint64_t)1000000);
3871 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff)
3873 uint64_t logvar, logvar2, logvar3;
3874 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val;
3876 if (rack->rc_gp_incr) {
3877 /* Turn off increment counting */
3878 rack->rc_gp_incr = 0;
3879 rack->rc_gp_timely_inc_cnt = 0;
3881 ss_red = ca_red = rec_red = 0;
3883 /* Calculate the reduction value */
3887 /* Must be at least 1% reduction */
3888 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) {
3889 /* We have been in recovery ding it too */
3890 if (timely_says == 2) {
3891 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt);
3892 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3898 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3899 if (rack->r_ctl.rack_per_of_gp_rec > val) {
3900 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val);
3901 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val;
3903 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
3906 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec)
3907 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
3910 if (rack->rc_gp_saw_ss) {
3912 if (timely_says == 2) {
3913 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt);
3914 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3920 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff);
3921 if (rack->r_ctl.rack_per_of_gp_ss > new_per) {
3922 ss_red = rack->r_ctl.rack_per_of_gp_ss - val;
3923 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val;
3926 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
3930 logvar2 = (uint32_t)rtt;
3932 logvar2 |= (uint32_t)rtt_diff;
3933 logvar3 = rack_gp_rtt_maxmul;
3935 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3936 rack_log_timely(rack, timely_says,
3938 logvar, __LINE__, 10);
3940 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss)
3941 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
3943 } else if (rack->rc_gp_saw_ca) {
3945 if (timely_says == 2) {
3946 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt);
3947 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3953 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff);
3954 if (rack->r_ctl.rack_per_of_gp_ca > val) {
3955 ca_red = rack->r_ctl.rack_per_of_gp_ca - val;
3956 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val;
3958 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
3963 logvar2 = (uint32_t)rtt;
3965 logvar2 |= (uint32_t)rtt_diff;
3966 logvar3 = rack_gp_rtt_maxmul;
3968 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3969 rack_log_timely(rack, timely_says,
3971 logvar, __LINE__, 10);
3973 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca)
3974 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
3977 if (rack->rc_gp_timely_dec_cnt < 0x7) {
3978 rack->rc_gp_timely_dec_cnt++;
3979 if (rack_timely_dec_clear &&
3980 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear))
3981 rack->rc_gp_timely_dec_cnt = 0;
3986 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar,
3991 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts,
3992 uint32_t rtt, uint32_t line, uint8_t reas)
3994 if (tcp_bblogging_on(rack->rc_tp)) {
3995 union tcp_log_stackspecific log;
3998 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3999 log.u_bbr.flex1 = line;
4000 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts;
4001 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts;
4002 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
4003 log.u_bbr.flex5 = rtt;
4004 log.u_bbr.flex6 = rack->rc_highly_buffered;
4005 log.u_bbr.flex6 <<= 1;
4006 log.u_bbr.flex6 |= rack->forced_ack;
4007 log.u_bbr.flex6 <<= 1;
4008 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul;
4009 log.u_bbr.flex6 <<= 1;
4010 log.u_bbr.flex6 |= rack->in_probe_rtt;
4011 log.u_bbr.flex6 <<= 1;
4012 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt;
4013 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt;
4014 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca;
4015 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec;
4016 log.u_bbr.flex8 = reas;
4017 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
4018 log.u_bbr.delRate = rack_get_bw(rack);
4019 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt;
4020 log.u_bbr.cur_del_rate <<= 32;
4021 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt;
4022 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered;
4023 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
4024 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
4025 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
4026 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
4027 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts;
4028 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight;
4029 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
4030 log.u_bbr.rttProp = us_cts;
4031 log.u_bbr.rttProp <<= 32;
4032 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt;
4033 TCP_LOG_EVENTP(rack->rc_tp, NULL,
4034 &rack->rc_inp->inp_socket->so_rcv,
4035 &rack->rc_inp->inp_socket->so_snd,
4036 BBR_LOG_RTT_SHRINKS, 0,
4037 0, &log, false, &rack->r_ctl.act_rcv_time);
4042 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt)
4046 bwdp = rack_get_bw(rack);
4047 bwdp *= (uint64_t)rtt;
4048 bwdp /= (uint64_t)HPTS_USEC_IN_SEC;
4049 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz);
4050 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) {
4052 * A window protocol must be able to have 4 packets
4053 * outstanding as the floor in order to function
4054 * (especially considering delayed ack :D).
4056 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs);
4061 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts)
4064 * ProbeRTT is a bit different in rack_pacing than in
4065 * BBR. It is like BBR in that it uses the lowering of
4066 * the RTT as a signal that we saw something new and
4067 * counts from there for how long between. But it is
4068 * different in that its quite simple. It does not
4069 * play with the cwnd and wait until we get down
4070 * to N segments outstanding and hold that for
4071 * 200ms. Instead it just sets the pacing reduction
4072 * rate to a set percentage (70 by default) and hold
4073 * that for a number of recent GP Srtt's.
4077 if (rack->rc_gp_dyn_mul == 0)
4080 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) {
4084 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
4085 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
4087 * Stop the goodput now, the idea here is
4088 * that future measurements with in_probe_rtt
4089 * won't register if they are not greater so
4090 * we want to get what info (if any) is available
4093 rack_do_goodput_measurement(rack->rc_tp, rack,
4094 rack->rc_tp->snd_una, __LINE__,
4095 RACK_QUALITY_PROBERTT);
4097 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
4098 rack->r_ctl.rc_time_probertt_entered = us_cts;
4099 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
4100 rack->r_ctl.rc_pace_min_segs);
4101 rack->in_probe_rtt = 1;
4102 rack->measure_saw_probe_rtt = 1;
4103 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
4104 rack->r_ctl.rc_time_probertt_starts = 0;
4105 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt;
4106 if (rack_probertt_use_min_rtt_entry)
4107 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
4109 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt);
4110 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4111 __LINE__, RACK_RTTS_ENTERPROBE);
4115 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts)
4117 struct rack_sendmap *rsm;
4120 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
4121 rack->r_ctl.rc_pace_min_segs);
4122 rack->in_probe_rtt = 0;
4123 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
4124 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
4126 * Stop the goodput now, the idea here is
4127 * that future measurements with in_probe_rtt
4128 * won't register if they are not greater so
4129 * we want to get what info (if any) is available
4132 rack_do_goodput_measurement(rack->rc_tp, rack,
4133 rack->rc_tp->snd_una, __LINE__,
4134 RACK_QUALITY_PROBERTT);
4135 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
4137 * We don't have enough data to make a measurement.
4138 * So lets just stop and start here after exiting
4139 * probe-rtt. We probably are not interested in
4140 * the results anyway.
4142 rack->rc_tp->t_flags &= ~TF_GPUTINPROG;
4145 * Measurements through the current snd_max are going
4146 * to be limited by the slower pacing rate.
4148 * We need to mark these as app-limited so we
4149 * don't collapse the b/w.
4151 rsm = tqhash_max(rack->r_ctl.tqh);
4152 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
4153 if (rack->r_ctl.rc_app_limited_cnt == 0)
4154 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
4157 * Go out to the end app limited and mark
4158 * this new one as next and move the end_appl up
4161 if (rack->r_ctl.rc_end_appl)
4162 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
4163 rack->r_ctl.rc_end_appl = rsm;
4165 rsm->r_flags |= RACK_APP_LIMITED;
4166 rack->r_ctl.rc_app_limited_cnt++;
4169 * Now, we need to examine our pacing rate multipliers.
4170 * If its under 100%, we need to kick it back up to
4171 * 100%. We also don't let it be over our "max" above
4172 * the actual rate i.e. 100% + rack_clamp_atexit_prtt.
4173 * Note setting clamp_atexit_prtt to 0 has the effect
4174 * of setting CA/SS to 100% always at exit (which is
4175 * the default behavior).
4177 if (rack_probertt_clear_is) {
4178 rack->rc_gp_incr = 0;
4179 rack->rc_gp_bwred = 0;
4180 rack->rc_gp_timely_inc_cnt = 0;
4181 rack->rc_gp_timely_dec_cnt = 0;
4183 /* Do we do any clamping at exit? */
4184 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) {
4185 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp;
4186 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp;
4188 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) {
4189 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt;
4190 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt;
4193 * Lets set rtt_diff to 0, so that we will get a "boost"
4196 rack->r_ctl.rc_rtt_diff = 0;
4198 /* Clear all flags so we start fresh */
4199 rack->rc_tp->t_bytes_acked = 0;
4200 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND;
4202 * If configured to, set the cwnd and ssthresh to
4205 if (rack_probe_rtt_sets_cwnd) {
4209 /* Set ssthresh so we get into CA once we hit our target */
4210 if (rack_probertt_use_min_rtt_exit == 1) {
4211 /* Set to min rtt */
4212 rack_set_prtt_target(rack, segsiz,
4213 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
4214 } else if (rack_probertt_use_min_rtt_exit == 2) {
4215 /* Set to current gp rtt */
4216 rack_set_prtt_target(rack, segsiz,
4217 rack->r_ctl.rc_gp_srtt);
4218 } else if (rack_probertt_use_min_rtt_exit == 3) {
4219 /* Set to entry gp rtt */
4220 rack_set_prtt_target(rack, segsiz,
4221 rack->r_ctl.rc_entry_gp_rtt);
4226 sum = rack->r_ctl.rc_entry_gp_rtt;
4228 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt));
4231 * A highly buffered path needs
4232 * cwnd space for timely to work.
4233 * Lets set things up as if
4234 * we are heading back here again.
4236 setval = rack->r_ctl.rc_entry_gp_rtt;
4237 } else if (sum >= 15) {
4239 * Lets take the smaller of the
4240 * two since we are just somewhat
4243 setval = rack->r_ctl.rc_gp_srtt;
4244 if (setval > rack->r_ctl.rc_entry_gp_rtt)
4245 setval = rack->r_ctl.rc_entry_gp_rtt;
4248 * Here we are not highly buffered
4249 * and should pick the min we can to
4250 * keep from causing loss.
4252 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
4254 rack_set_prtt_target(rack, segsiz,
4257 if (rack_probe_rtt_sets_cwnd > 1) {
4258 /* There is a percentage here to boost */
4259 ebdp = rack->r_ctl.rc_target_probertt_flight;
4260 ebdp *= rack_probe_rtt_sets_cwnd;
4262 setto = rack->r_ctl.rc_target_probertt_flight + ebdp;
4264 setto = rack->r_ctl.rc_target_probertt_flight;
4265 rack->rc_tp->snd_cwnd = roundup(setto, segsiz);
4266 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) {
4268 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs;
4270 /* If we set in the cwnd also set the ssthresh point so we are in CA */
4271 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1);
4273 rack_log_rtt_shrinks(rack, us_cts,
4274 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4275 __LINE__, RACK_RTTS_EXITPROBE);
4276 /* Clear times last so log has all the info */
4277 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max;
4278 rack->r_ctl.rc_time_probertt_entered = us_cts;
4279 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
4280 rack->r_ctl.rc_time_of_last_probertt = us_cts;
4284 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts)
4286 /* Check in on probe-rtt */
4287 if (rack->rc_gp_filled == 0) {
4288 /* We do not do p-rtt unless we have gp measurements */
4291 if (rack->in_probe_rtt) {
4292 uint64_t no_overflow;
4293 uint32_t endtime, must_stay;
4295 if (rack->r_ctl.rc_went_idle_time &&
4296 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) {
4298 * We went idle during prtt, just exit now.
4300 rack_exit_probertt(rack, us_cts);
4301 } else if (rack_probe_rtt_safety_val &&
4302 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) &&
4303 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) {
4305 * Probe RTT safety value triggered!
4307 rack_log_rtt_shrinks(rack, us_cts,
4308 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4309 __LINE__, RACK_RTTS_SAFETY);
4310 rack_exit_probertt(rack, us_cts);
4312 /* Calculate the max we will wait */
4313 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait);
4314 if (rack->rc_highly_buffered)
4315 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp);
4316 /* Calculate the min we must wait */
4317 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain);
4318 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) &&
4319 TSTMP_LT(us_cts, endtime)) {
4321 /* Do we lower more? */
4323 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered))
4324 calc = us_cts - rack->r_ctl.rc_time_probertt_entered;
4327 calc /= max(rack->r_ctl.rc_gp_srtt, 1);
4330 calc *= rack_per_of_gp_probertt_reduce;
4331 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc;
4333 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh)
4334 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh;
4336 /* We must reach target or the time set */
4339 if (rack->r_ctl.rc_time_probertt_starts == 0) {
4340 if ((TSTMP_LT(us_cts, must_stay) &&
4341 rack->rc_highly_buffered) ||
4342 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) >
4343 rack->r_ctl.rc_target_probertt_flight)) {
4344 /* We are not past the must_stay time */
4347 rack_log_rtt_shrinks(rack, us_cts,
4348 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4349 __LINE__, RACK_RTTS_REACHTARGET);
4350 rack->r_ctl.rc_time_probertt_starts = us_cts;
4351 if (rack->r_ctl.rc_time_probertt_starts == 0)
4352 rack->r_ctl.rc_time_probertt_starts = 1;
4353 /* Restore back to our rate we want to pace at in prtt */
4354 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
4357 * Setup our end time, some number of gp_srtts plus 200ms.
4359 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt *
4360 (uint64_t)rack_probertt_gpsrtt_cnt_mul);
4361 if (rack_probertt_gpsrtt_cnt_div)
4362 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div);
4365 endtime += rack_min_probertt_hold;
4366 endtime += rack->r_ctl.rc_time_probertt_starts;
4367 if (TSTMP_GEQ(us_cts, endtime)) {
4368 /* yes, exit probertt */
4369 rack_exit_probertt(rack, us_cts);
4372 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) {
4373 /* Go into probertt, its been too long since we went lower */
4374 rack_enter_probertt(rack, us_cts);
4379 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est,
4380 uint32_t rtt, int32_t rtt_diff)
4382 uint64_t cur_bw, up_bnd, low_bnd, subfr;
4385 if ((rack->rc_gp_dyn_mul == 0) ||
4386 (rack->use_fixed_rate) ||
4387 (rack->in_probe_rtt) ||
4388 (rack->rc_always_pace == 0)) {
4389 /* No dynamic GP multiplier in play */
4392 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start;
4393 cur_bw = rack_get_bw(rack);
4394 /* Calculate our up and down range */
4395 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up;
4397 up_bnd += rack->r_ctl.last_gp_comp_bw;
4399 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down;
4401 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr;
4402 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) {
4404 * This is the case where our RTT is above
4405 * the max target and we have been configured
4406 * to just do timely no bonus up stuff in that case.
4408 * There are two configurations, set to 1, and we
4409 * just do timely if we are over our max. If its
4410 * set above 1 then we slam the multipliers down
4411 * to 100 and then decrement per timely.
4413 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4415 if (rack->r_ctl.rc_no_push_at_mrtt > 1)
4416 rack_validate_multipliers_at_or_below_100(rack);
4417 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
4418 } else if ((timely_says != 0) && (last_bw_est < low_bnd) && !losses) {
4420 * We are decreasing this is a bit complicated this
4421 * means we are loosing ground. This could be
4422 * because another flow entered and we are competing
4423 * for b/w with it. This will push the RTT up which
4424 * makes timely unusable unless we want to get shoved
4425 * into a corner and just be backed off (the age
4426 * old problem with delay based CC).
4428 * On the other hand if it was a route change we
4429 * would like to stay somewhat contained and not
4430 * blow out the buffers.
4432 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4434 rack->r_ctl.last_gp_comp_bw = cur_bw;
4435 if (rack->rc_gp_bwred == 0) {
4436 /* Go into reduction counting */
4437 rack->rc_gp_bwred = 1;
4438 rack->rc_gp_timely_dec_cnt = 0;
4440 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) {
4442 * Push another time with a faster pacing
4443 * to try to gain back (we include override to
4444 * get a full raise factor).
4446 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) ||
4447 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) ||
4448 (timely_says == 0) ||
4449 (rack_down_raise_thresh == 0)) {
4451 * Do an override up in b/w if we were
4452 * below the threshold or if the threshold
4453 * is zero we always do the raise.
4455 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1);
4457 /* Log it stays the same */
4458 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0,
4461 rack->rc_gp_timely_dec_cnt++;
4462 /* We are not incrementing really no-count */
4463 rack->rc_gp_incr = 0;
4464 rack->rc_gp_timely_inc_cnt = 0;
4467 * Lets just use the RTT
4468 * information and give up
4473 } else if ((timely_says != 2) &&
4475 (last_bw_est > up_bnd)) {
4477 * We are increasing b/w lets keep going, updating
4478 * our b/w and ignoring any timely input, unless
4479 * of course we are at our max raise (if there is one).
4482 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4484 rack->r_ctl.last_gp_comp_bw = cur_bw;
4485 if (rack->rc_gp_saw_ss &&
4486 rack->r_ctl.rack_per_upper_bound_ss &&
4487 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) {
4489 * In cases where we can't go higher
4490 * we should just use timely.
4494 if (rack->rc_gp_saw_ca &&
4495 rack->r_ctl.rack_per_upper_bound_ca &&
4496 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) {
4498 * In cases where we can't go higher
4499 * we should just use timely.
4503 rack->rc_gp_bwred = 0;
4504 rack->rc_gp_timely_dec_cnt = 0;
4505 /* You get a set number of pushes if timely is trying to reduce */
4506 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) {
4507 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4509 /* Log it stays the same */
4510 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0,
4516 * We are staying between the lower and upper range bounds
4517 * so use timely to decide.
4519 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4523 rack->rc_gp_incr = 0;
4524 rack->rc_gp_timely_inc_cnt = 0;
4525 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) &&
4527 (last_bw_est < low_bnd)) {
4528 /* We are loosing ground */
4529 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4530 rack->rc_gp_timely_dec_cnt++;
4531 /* We are not incrementing really no-count */
4532 rack->rc_gp_incr = 0;
4533 rack->rc_gp_timely_inc_cnt = 0;
4535 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
4537 rack->rc_gp_bwred = 0;
4538 rack->rc_gp_timely_dec_cnt = 0;
4539 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4545 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt)
4547 int32_t timely_says;
4548 uint64_t log_mult, log_rtt_a_diff;
4550 log_rtt_a_diff = rtt;
4551 log_rtt_a_diff <<= 32;
4552 log_rtt_a_diff |= (uint32_t)rtt_diff;
4553 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) *
4554 rack_gp_rtt_maxmul)) {
4555 /* Reduce the b/w multiplier */
4557 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
4559 log_mult |= prev_rtt;
4560 rack_log_timely(rack, timely_says, log_mult,
4561 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4562 log_rtt_a_diff, __LINE__, 4);
4563 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
4564 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
4565 max(rack_gp_rtt_mindiv , 1)))) {
4566 /* Increase the b/w multiplier */
4567 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
4568 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
4569 max(rack_gp_rtt_mindiv , 1));
4571 log_mult |= prev_rtt;
4573 rack_log_timely(rack, timely_says, log_mult ,
4574 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4575 log_rtt_a_diff, __LINE__, 5);
4578 * Use a gradient to find it the timely gradient
4580 * grad = rc_rtt_diff / min_rtt;
4582 * anything below or equal to 0 will be
4583 * a increase indication. Anything above
4584 * zero is a decrease. Note we take care
4585 * of the actual gradient calculation
4586 * in the reduction (its not needed for
4589 log_mult = prev_rtt;
4590 if (rtt_diff <= 0) {
4592 * Rttdiff is less than zero, increase the
4593 * b/w multiplier (its 0 or negative)
4596 rack_log_timely(rack, timely_says, log_mult,
4597 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6);
4599 /* Reduce the b/w multiplier */
4601 rack_log_timely(rack, timely_says, log_mult,
4602 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7);
4605 return (timely_says);
4609 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm)
4611 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) &&
4612 SEQ_LEQ(rsm->r_end, tp->gput_ack)) {
4614 * This covers the case that the
4615 * resent is completely inside
4616 * the gp range or up to it.
4617 * |----------------|
4623 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) &&
4624 SEQ_GT(rsm->r_end, tp->gput_seq)){
4626 * This covers the case of
4631 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) &&
4632 SEQ_LT(rsm->r_start, tp->gput_ack) &&
4633 SEQ_GEQ(rsm->r_end, tp->gput_ack)) {
4636 * This covers the case of
4645 static __inline void
4646 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm)
4649 if ((tp->t_flags & TF_GPUTINPROG) == 0)
4652 * We have a Goodput measurement in progress. Mark
4653 * the send if its within the window. If its not
4654 * in the window make sure it does not have the mark.
4656 if (rack_in_gp_window(tp, rsm))
4657 rsm->r_flags |= RACK_IN_GP_WIN;
4659 rsm->r_flags &= ~RACK_IN_GP_WIN;
4662 static __inline void
4663 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack)
4665 /* A GP measurement is ending, clear all marks on the send map*/
4666 struct rack_sendmap *rsm = NULL;
4668 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
4670 rsm = tqhash_min(rack->r_ctl.tqh);
4673 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){
4674 rsm->r_flags &= ~RACK_IN_GP_WIN;
4675 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
4680 static __inline void
4681 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack)
4683 struct rack_sendmap *rsm = NULL;
4685 if (tp->snd_una == tp->snd_max) {
4686 /* Nothing outstanding yet, nothing to do here */
4689 if (SEQ_GT(tp->gput_seq, tp->snd_una)) {
4691 * We are measuring ahead of some outstanding
4692 * data. We need to walk through up until we get
4693 * to gp_seq marking so that no rsm is set incorrectly
4694 * with RACK_IN_GP_WIN.
4696 rsm = tqhash_min(rack->r_ctl.tqh);
4697 while (rsm != NULL) {
4698 rack_mark_in_gp_win(tp, rsm);
4699 if (SEQ_GEQ(rsm->r_end, tp->gput_seq))
4701 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
4706 * Need to find the GP seq, if rsm is
4707 * set we stopped as we hit it.
4709 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
4712 rack_mark_in_gp_win(tp, rsm);
4715 * Now we may need to mark already sent rsm, ahead of
4716 * gput_seq in the window since they may have been sent
4717 * *before* we started our measurment. The rsm, if non-null
4718 * has been marked (note if rsm would have been NULL we would have
4719 * returned in the previous block). So we go to the next, and continue
4720 * until we run out of entries or we exceed the gp_ack value.
4722 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
4724 rack_mark_in_gp_win(tp, rsm);
4725 if (SEQ_GT(rsm->r_end, tp->gput_ack))
4727 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
4732 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
4733 tcp_seq th_ack, int line, uint8_t quality)
4735 uint64_t tim, bytes_ps, stim, utim;
4736 uint32_t segsiz, bytes, reqbytes, us_cts;
4737 int32_t gput, new_rtt_diff, timely_says;
4738 uint64_t resid_bw, subpart = 0, addpart = 0, srtt;
4741 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
4742 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
4743 if (TSTMP_GEQ(us_cts, tp->gput_ts))
4744 tim = us_cts - tp->gput_ts;
4747 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts)
4748 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts;
4752 * Use the larger of the send time or ack time. This prevents us
4753 * from being influenced by ack artifacts to come up with too
4754 * high of measurement. Note that since we are spanning over many more
4755 * bytes in most of our measurements hopefully that is less likely to
4761 utim = max(stim, 1);
4762 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz));
4763 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL);
4764 if ((tim == 0) && (stim == 0)) {
4766 * Invalid measurement time, maybe
4767 * all on one ack/one send?
4771 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4772 0, 0, 0, 10, __LINE__, NULL, quality);
4773 goto skip_measurement;
4775 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) {
4776 /* We never made a us_rtt measurement? */
4779 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4780 0, 0, 0, 10, __LINE__, NULL, quality);
4781 goto skip_measurement;
4784 * Calculate the maximum possible b/w this connection
4785 * could have. We base our calculation on the lowest
4786 * rtt we have seen during the measurement and the
4787 * largest rwnd the client has given us in that time. This
4788 * forms a BDP that is the maximum that we could ever
4789 * get to the client. Anything larger is not valid.
4791 * I originally had code here that rejected measurements
4792 * where the time was less than 1/2 the latest us_rtt.
4793 * But after thinking on that I realized its wrong since
4794 * say you had a 150Mbps or even 1Gbps link, and you
4795 * were a long way away.. example I am in Europe (100ms rtt)
4796 * talking to my 1Gbps link in S.C. Now measuring say 150,000
4797 * bytes my time would be 1.2ms, and yet my rtt would say
4798 * the measurement was invalid the time was < 50ms. The
4799 * same thing is true for 150Mb (8ms of time).
4801 * A better way I realized is to look at what the maximum
4802 * the connection could possibly do. This is gated on
4803 * the lowest RTT we have seen and the highest rwnd.
4804 * We should in theory never exceed that, if we are
4805 * then something on the path is storing up packets
4806 * and then feeding them all at once to our endpoint
4807 * messing up our measurement.
4809 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd;
4810 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC;
4811 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt;
4812 if (SEQ_LT(th_ack, tp->gput_seq)) {
4813 /* No measurement can be made */
4816 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4817 0, 0, 0, 10, __LINE__, NULL, quality);
4818 goto skip_measurement;
4820 bytes = (th_ack - tp->gput_seq);
4821 bytes_ps = (uint64_t)bytes;
4823 * Don't measure a b/w for pacing unless we have gotten at least
4824 * an initial windows worth of data in this measurement interval.
4826 * Small numbers of bytes get badly influenced by delayed ack and
4827 * other artifacts. Note we take the initial window or our
4828 * defined minimum GP (defaulting to 10 which hopefully is the
4831 if (rack->rc_gp_filled == 0) {
4833 * The initial estimate is special. We
4834 * have blasted out an IW worth of packets
4835 * without a real valid ack ts results. We
4836 * then setup the app_limited_needs_set flag,
4837 * this should get the first ack in (probably 2
4838 * MSS worth) to be recorded as the timestamp.
4839 * We thus allow a smaller number of bytes i.e.
4842 reqbytes -= (2 * segsiz);
4843 /* Also lets fill previous for our first measurement to be neutral */
4844 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
4846 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) {
4847 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4848 rack->r_ctl.rc_app_limited_cnt,
4849 0, 0, 10, __LINE__, NULL, quality);
4850 goto skip_measurement;
4853 * We now need to calculate the Timely like status so
4854 * we can update (possibly) the b/w multipliers.
4856 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt;
4857 if (rack->rc_gp_filled == 0) {
4858 /* No previous reading */
4859 rack->r_ctl.rc_rtt_diff = new_rtt_diff;
4861 if (rack->measure_saw_probe_rtt == 0) {
4863 * We don't want a probertt to be counted
4864 * since it will be negative incorrectly. We
4865 * expect to be reducing the RTT when we
4866 * pace at a slower rate.
4868 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8);
4869 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8);
4872 timely_says = rack_make_timely_judgement(rack,
4873 rack->r_ctl.rc_gp_srtt,
4874 rack->r_ctl.rc_rtt_diff,
4875 rack->r_ctl.rc_prev_gp_srtt
4877 bytes_ps *= HPTS_USEC_IN_SEC;
4879 if (bytes_ps > rack->r_ctl.last_max_bw) {
4881 * Something is on path playing
4882 * since this b/w is not possible based
4883 * on our BDP (highest rwnd and lowest rtt
4884 * we saw in the measurement window).
4886 * Another option here would be to
4887 * instead skip the measurement.
4889 rack_log_pacing_delay_calc(rack, bytes, reqbytes,
4890 bytes_ps, rack->r_ctl.last_max_bw, 0,
4891 11, __LINE__, NULL, quality);
4892 bytes_ps = rack->r_ctl.last_max_bw;
4894 /* We store gp for b/w in bytes per second */
4895 if (rack->rc_gp_filled == 0) {
4896 /* Initial measurement */
4898 rack->r_ctl.gp_bw = bytes_ps;
4899 rack->rc_gp_filled = 1;
4900 rack->r_ctl.num_measurements = 1;
4901 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
4903 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4904 rack->r_ctl.rc_app_limited_cnt,
4905 0, 0, 10, __LINE__, NULL, quality);
4907 if (tcp_in_hpts(rack->rc_inp) &&
4908 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
4910 * Ok we can't trust the pacer in this case
4911 * where we transition from un-paced to paced.
4912 * Or for that matter when the burst mitigation
4913 * was making a wild guess and got it wrong.
4914 * Stop the pacer and clear up all the aggregate
4917 tcp_hpts_remove(rack->rc_inp);
4918 rack->r_ctl.rc_hpts_flags = 0;
4919 rack->r_ctl.rc_last_output_to = 0;
4922 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) {
4923 /* Still a small number run an average */
4924 rack->r_ctl.gp_bw += bytes_ps;
4925 addpart = rack->r_ctl.num_measurements;
4926 rack->r_ctl.num_measurements++;
4927 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
4928 /* We have collected enough to move forward */
4929 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements;
4931 rack_set_pace_segments(tp, rack, __LINE__, NULL);
4935 * We want to take 1/wma of the goodput and add in to 7/8th
4936 * of the old value weighted by the srtt. So if your measurement
4937 * period is say 2 SRTT's long you would get 1/4 as the
4938 * value, if it was like 1/2 SRTT then you would get 1/16th.
4940 * But we must be careful not to take too much i.e. if the
4941 * srtt is say 20ms and the measurement is taken over
4942 * 400ms our weight would be 400/20 i.e. 20. On the
4943 * other hand if we get a measurement over 1ms with a
4944 * 10ms rtt we only want to take a much smaller portion.
4946 if (rack->r_ctl.num_measurements < 0xff) {
4947 rack->r_ctl.num_measurements++;
4949 srtt = (uint64_t)tp->t_srtt;
4952 * Strange why did t_srtt go back to zero?
4954 if (rack->r_ctl.rc_rack_min_rtt)
4955 srtt = rack->r_ctl.rc_rack_min_rtt;
4957 srtt = HPTS_USEC_IN_MSEC;
4960 * XXXrrs: Note for reviewers, in playing with
4961 * dynamic pacing I discovered this GP calculation
4962 * as done originally leads to some undesired results.
4963 * Basically you can get longer measurements contributing
4964 * too much to the WMA. Thus I changed it if you are doing
4965 * dynamic adjustments to only do the aportioned adjustment
4966 * if we have a very small (time wise) measurement. Longer
4967 * measurements just get there weight (defaulting to 1/8)
4968 * add to the WMA. We may want to think about changing
4969 * this to always do that for both sides i.e. dynamic
4970 * and non-dynamic... but considering lots of folks
4971 * were playing with this I did not want to change the
4972 * calculation per.se. without your thoughts.. Lawerence?
4975 if (rack->rc_gp_dyn_mul == 0) {
4976 subpart = rack->r_ctl.gp_bw * utim;
4977 subpart /= (srtt * 8);
4978 if (subpart < (rack->r_ctl.gp_bw / 2)) {
4980 * The b/w update takes no more
4981 * away then 1/2 our running total
4984 addpart = bytes_ps * utim;
4985 addpart /= (srtt * 8);
4988 * Don't allow a single measurement
4989 * to account for more than 1/2 of the
4990 * WMA. This could happen on a retransmission
4991 * where utim becomes huge compared to
4992 * srtt (multiple retransmissions when using
4993 * the sending rate which factors in all the
4994 * transmissions from the first one).
4996 subpart = rack->r_ctl.gp_bw / 2;
4997 addpart = bytes_ps / 2;
4999 resid_bw = rack->r_ctl.gp_bw - subpart;
5000 rack->r_ctl.gp_bw = resid_bw + addpart;
5003 if ((utim / srtt) <= 1) {
5005 * The b/w update was over a small period
5006 * of time. The idea here is to prevent a small
5007 * measurement time period from counting
5008 * too much. So we scale it based on the
5009 * time so it attributes less than 1/rack_wma_divisor
5010 * of its measurement.
5012 subpart = rack->r_ctl.gp_bw * utim;
5013 subpart /= (srtt * rack_wma_divisor);
5014 addpart = bytes_ps * utim;
5015 addpart /= (srtt * rack_wma_divisor);
5018 * The scaled measurement was long
5019 * enough so lets just add in the
5020 * portion of the measurement i.e. 1/rack_wma_divisor
5022 subpart = rack->r_ctl.gp_bw / rack_wma_divisor;
5023 addpart = bytes_ps / rack_wma_divisor;
5025 if ((rack->measure_saw_probe_rtt == 0) ||
5026 (bytes_ps > rack->r_ctl.gp_bw)) {
5028 * For probe-rtt we only add it in
5029 * if its larger, all others we just
5033 resid_bw = rack->r_ctl.gp_bw - subpart;
5034 rack->r_ctl.gp_bw = resid_bw + addpart;
5037 rack_set_pace_segments(tp, rack, __LINE__, NULL);
5039 if ((rack->gp_ready == 0) &&
5040 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
5041 /* We have enough measurements now */
5043 if ((rack->rc_always_pace && (rack->use_fixed_rate == 0)) ||
5045 rack_set_cc_pacing(rack);
5046 if (rack->defer_options)
5047 rack_apply_deferred_options(rack);
5049 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim,
5050 rack_get_bw(rack), 22, did_add, NULL, quality);
5051 /* We do not update any multipliers if we are in or have seen a probe-rtt */
5052 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set)
5053 rack_update_multiplier(rack, timely_says, bytes_ps,
5054 rack->r_ctl.rc_gp_srtt,
5055 rack->r_ctl.rc_rtt_diff);
5056 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim,
5057 rack_get_bw(rack), 3, line, NULL, quality);
5058 rack_log_pacing_delay_calc(rack,
5061 bytes_ps, /* bw_inuse */
5062 rack->r_ctl.gp_bw, /* delRate */
5063 rack_get_lt_bw(rack), /* rttProp */
5065 /* reset the gp srtt and setup the new prev */
5066 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
5067 /* Record the lost count for the next measurement */
5068 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count;
5071 * We restart our diffs based on the gpsrtt in the
5072 * measurement window.
5074 rack->rc_gp_rtt_set = 0;
5075 rack->rc_gp_saw_rec = 0;
5076 rack->rc_gp_saw_ca = 0;
5077 rack->rc_gp_saw_ss = 0;
5078 rack->rc_dragged_bottom = 0;
5080 if (quality == RACK_QUALITY_HIGH) {
5082 * Gput in the stats world is in kbps where bytes_ps is
5083 * bytes per second so we do ((x * 8)/ 1000).
5085 gput = (int32_t)((bytes_ps << 3) / (uint64_t)1000);
5087 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
5090 * XXXLAS: This is a temporary hack, and should be
5091 * chained off VOI_TCP_GPUT when stats(9) grows an
5092 * API to deal with chained VOIs.
5094 if (tp->t_stats_gput_prev > 0)
5095 stats_voi_update_abs_s32(tp->t_stats,
5097 ((gput - tp->t_stats_gput_prev) * 100) /
5098 tp->t_stats_gput_prev);
5100 tp->t_stats_gput_prev = gput;
5102 tp->t_flags &= ~TF_GPUTINPROG;
5104 * Now are we app limited now and there is space from where we
5105 * were to where we want to go?
5107 * We don't do the other case i.e. non-applimited here since
5108 * the next send will trigger us picking up the missing data.
5110 if (rack->r_ctl.rc_first_appl &&
5111 TCPS_HAVEESTABLISHED(tp->t_state) &&
5112 rack->r_ctl.rc_app_limited_cnt &&
5113 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) &&
5114 ((rack->r_ctl.rc_first_appl->r_end - th_ack) >
5115 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
5117 * Yep there is enough outstanding to make a measurement here.
5119 struct rack_sendmap *rsm;
5121 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
5122 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
5123 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
5124 rack->app_limited_needs_set = 0;
5125 tp->gput_seq = th_ack;
5126 if (rack->in_probe_rtt)
5127 rack->measure_saw_probe_rtt = 1;
5128 else if ((rack->measure_saw_probe_rtt) &&
5129 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
5130 rack->measure_saw_probe_rtt = 0;
5131 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) {
5132 /* There is a full window to gain info from */
5133 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
5135 /* We can only measure up to the applimited point */
5136 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack);
5137 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) {
5139 * We don't have enough to make a measurement.
5141 tp->t_flags &= ~TF_GPUTINPROG;
5142 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
5143 0, 0, 0, 6, __LINE__, NULL, quality);
5147 if (tp->t_state >= TCPS_FIN_WAIT_1) {
5149 * We will get no more data into the SB
5150 * this means we need to have the data available
5151 * before we start a measurement.
5153 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) {
5154 /* Nope not enough data. */
5158 tp->t_flags |= TF_GPUTINPROG;
5160 * Now we need to find the timestamp of the send at tp->gput_seq
5161 * for the send based measurement.
5163 rack->r_ctl.rc_gp_cumack_ts = 0;
5164 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
5166 /* Ok send-based limit is set */
5167 if (SEQ_LT(rsm->r_start, tp->gput_seq)) {
5169 * Move back to include the earlier part
5170 * so our ack time lines up right (this may
5171 * make an overlapping measurement but thats
5174 tp->gput_seq = rsm->r_start;
5176 if (rsm->r_flags & RACK_ACKED) {
5177 struct rack_sendmap *nrsm;
5179 tp->gput_ts = (uint32_t)rsm->r_ack_arrival;
5180 tp->gput_seq = rsm->r_end;
5181 nrsm = tqhash_next(rack->r_ctl.tqh, rsm);
5185 rack->app_limited_needs_set = 1;
5188 rack->app_limited_needs_set = 1;
5189 /* We always go from the first send */
5190 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0];
5193 * If we don't find the rsm due to some
5194 * send-limit set the current time, which
5195 * basically disables the send-limit.
5200 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
5202 rack_tend_gp_marks(tp, rack);
5203 rack_log_pacing_delay_calc(rack,
5208 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts),
5210 __LINE__, rsm, quality);
5211 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL);
5214 * To make sure proper timestamp merging occurs, we need to clear
5215 * all GP marks if we don't start a measurement.
5217 rack_clear_gp_marks(tp, rack);
5222 * CC wrapper hook functions
5225 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs,
5226 uint16_t type, int32_t recovery)
5228 uint32_t prior_cwnd, acked;
5229 struct tcp_log_buffer *lgb = NULL;
5230 uint8_t labc_to_use, quality;
5232 INP_WLOCK_ASSERT(tptoinpcb(tp));
5233 tp->t_ccv.nsegs = nsegs;
5234 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una);
5235 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
5238 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp);
5239 if (tp->t_ccv.bytes_this_ack > max) {
5240 tp->t_ccv.bytes_this_ack = max;
5244 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
5245 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd);
5247 if ((th_ack == tp->snd_max) && rack->lt_bw_up) {
5248 /* We will ack all, time
5249 * to end any lt_bw_up we
5250 * have running until something
5255 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq);
5256 rack->r_ctl.lt_seq = tp->snd_max;
5257 (void)tcp_get_usecs(&tv);
5258 rack->r_ctl.lt_bw_time += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark);
5261 quality = RACK_QUALITY_NONE;
5262 if ((tp->t_flags & TF_GPUTINPROG) &&
5263 rack_enough_for_measurement(tp, rack, th_ack, &quality)) {
5264 /* Measure the Goodput */
5265 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality);
5267 /* Which way our we limited, if not cwnd limited no advance in CA */
5268 if (tp->snd_cwnd <= tp->snd_wnd)
5269 tp->t_ccv.flags |= CCF_CWND_LIMITED;
5271 tp->t_ccv.flags &= ~CCF_CWND_LIMITED;
5272 if (tp->snd_cwnd > tp->snd_ssthresh) {
5273 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack,
5274 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp));
5275 /* For the setting of a window past use the actual scwnd we are using */
5276 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) {
5277 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use;
5278 tp->t_ccv.flags |= CCF_ABC_SENTAWND;
5281 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND;
5282 tp->t_bytes_acked = 0;
5284 prior_cwnd = tp->snd_cwnd;
5285 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec ||
5286 (rack_client_low_buf && rack->client_bufferlvl &&
5287 (rack->client_bufferlvl < rack_client_low_buf)))
5288 labc_to_use = rack->rc_labc;
5290 labc_to_use = rack_max_abc_post_recovery;
5291 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
5292 union tcp_log_stackspecific log;
5295 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5296 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
5297 log.u_bbr.flex1 = th_ack;
5298 log.u_bbr.flex2 = tp->t_ccv.flags;
5299 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack;
5300 log.u_bbr.flex4 = tp->t_ccv.nsegs;
5301 log.u_bbr.flex5 = labc_to_use;
5302 log.u_bbr.flex6 = prior_cwnd;
5303 log.u_bbr.flex7 = V_tcp_do_newsack;
5304 log.u_bbr.flex8 = 1;
5305 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
5306 0, &log, false, NULL, __func__, __LINE__,&tv);
5308 if (CC_ALGO(tp)->ack_received != NULL) {
5309 /* XXXLAS: Find a way to live without this */
5310 tp->t_ccv.curack = th_ack;
5311 tp->t_ccv.labc = labc_to_use;
5312 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC;
5313 CC_ALGO(tp)->ack_received(&tp->t_ccv, type);
5316 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd;
5318 if (rack->r_must_retran) {
5319 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) {
5321 * We now are beyond the rxt point so lets disable
5324 rack->r_ctl.rc_out_at_rto = 0;
5325 rack->r_must_retran = 0;
5326 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) {
5328 * Only decrement the rc_out_at_rto if the cwnd advances
5329 * at least a whole segment. Otherwise next time the peer
5330 * acks, we won't be able to send this generaly happens
5331 * when we are in Congestion Avoidance.
5333 if (acked <= rack->r_ctl.rc_out_at_rto){
5334 rack->r_ctl.rc_out_at_rto -= acked;
5336 rack->r_ctl.rc_out_at_rto = 0;
5341 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use);
5343 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) {
5344 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use;
5349 tcp_rack_partialack(struct tcpcb *tp)
5351 struct tcp_rack *rack;
5353 rack = (struct tcp_rack *)tp->t_fb_ptr;
5354 INP_WLOCK_ASSERT(tptoinpcb(tp));
5356 * If we are doing PRR and have enough
5357 * room to send <or> we are pacing and prr
5358 * is disabled we will want to see if we
5359 * can send data (by setting r_wanted_output to
5362 if ((rack->r_ctl.rc_prr_sndcnt > 0) ||
5364 rack->r_wanted_output = 1;
5368 rack_set_most_aggr(struct tcp_rack *rack)
5370 rack->r_fill_less_agg = 0;
5371 /* Once the cwnd as been clamped we don't do fill_cw */
5372 if (rack->r_cwnd_was_clamped == 0)
5373 rack->rc_pace_to_cwnd = 1;
5374 rack->r_pacing_discount = 0;
5378 rack_limit_fillcw(struct tcp_rack *rack)
5380 rack->r_fill_less_agg = 1;
5381 /* Once the cwnd as been clamped we don't do fill_cw */
5382 if (rack->r_cwnd_was_clamped == 0)
5383 rack->rc_pace_to_cwnd = 1;
5384 rack->r_pacing_discount = 0;
5388 rack_disable_fillcw(struct tcp_rack *rack)
5390 rack->r_fill_less_agg = 1;
5391 rack->rc_pace_to_cwnd = 0;
5392 rack->r_pacing_discount = 0;
5396 rack_client_buffer_level_set(struct tcp_rack *rack)
5399 * Only if DGP is on do we do anything that
5400 * changes stack behavior. If DGP is off all
5401 * we will do is issue a BB log (if BB logging is
5404 if (rack->dgp_on == 0) {
5405 rack_log_pacing_delay_calc(rack, 0, rack->client_bufferlvl,
5406 0, 0, 0, 30, __LINE__, NULL, 0);
5409 if (IN_RECOVERY(rack->rc_tp->t_flags) && rack->r_ctl.full_dgp_in_rec) {
5413 * We are in DGP so what setting should we
5414 * apply based on where the client is?
5416 switch(rack->r_ctl.rc_dgp_bl_agg) {
5420 rack_set_most_aggr(rack);
5423 if (rack->client_bufferlvl == 4)
5424 rack_limit_fillcw(rack);
5425 else if (rack->client_bufferlvl == 5)
5426 rack_disable_fillcw(rack);
5428 rack_set_most_aggr(rack);
5431 if (rack->client_bufferlvl == 3)
5432 rack_limit_fillcw(rack);
5433 else if (rack->client_bufferlvl == 4)
5434 rack_disable_fillcw(rack);
5435 else if (rack->client_bufferlvl == 5) {
5436 rack_disable_fillcw(rack);
5437 rack->r_pacing_discount = 1;
5438 rack->r_ctl.pacing_discount_amm = 1;
5440 rack_set_most_aggr(rack);
5443 if (rack->client_bufferlvl == 2)
5444 rack_limit_fillcw(rack);
5445 else if (rack->client_bufferlvl == 3)
5446 rack_disable_fillcw(rack);
5447 else if (rack->client_bufferlvl == 4) {
5448 rack_disable_fillcw(rack);
5449 rack->r_pacing_discount = 1;
5450 rack->r_ctl.pacing_discount_amm = 1;
5451 } else if (rack->client_bufferlvl == 5) {
5452 rack_disable_fillcw(rack);
5453 rack->r_pacing_discount = 1;
5454 rack->r_ctl.pacing_discount_amm = 2;
5456 rack_set_most_aggr(rack);
5459 rack_log_pacing_delay_calc(rack, rack->r_ctl.rc_dgp_bl_agg, rack->client_bufferlvl, 0,
5460 0, 0, 30, __LINE__, NULL, 0);
5464 do_rack_check_for_unclamp(struct tcpcb *tp, struct tcp_rack *rack)
5467 * Can we unclamp. We unclamp if more than
5468 * N rounds have transpired with no loss.
5470 uint64_t snds, rxts, rxt_per;
5473 rnds = rack->r_ctl.current_round - rack->r_ctl.last_rnd_rxt_clamped;
5474 if ((rack_unclamp_round_thresh > 0) &&
5475 (rnds >= rack_unclamp_round_thresh)) {
5476 snds = tp->t_sndbytes - rack->r_ctl.last_sndbytes;
5477 KASSERT ((snds > 0), ("rack:%p tp:%p snds:%ju is 0", rack, tp,
5479 rxts = tp->t_snd_rxt_bytes - rack->r_ctl.last_snd_rxt_bytes;
5480 rxt_per = rxts * 1000;
5482 if ((uint32_t)rxt_per <= rack_unclamp_rxt_thresh) {
5484 if (tcp_bblogging_on(rack->rc_tp)) {
5485 union tcp_log_stackspecific log;
5488 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5489 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
5490 log.u_bbr.flex3 = rnds;
5491 log.u_bbr.flex4 = rack_unclamp_round_thresh;
5492 log.u_bbr.flex5 = (uint32_t)rxt_per;
5493 log.u_bbr.flex8 = 6;
5494 log.u_bbr.pkt_epoch = rack->r_ctl.rc_pace_max_segs;
5495 log.u_bbr.bbr_state = rack->rc_pace_to_cwnd;
5496 log.u_bbr.delivered = rack->r_ctl.num_of_clamps_applied;
5497 log.u_bbr.applimited = rack->r_ctl.max_clamps;
5498 log.u_bbr.epoch = rack->r_ctl.clamp_options;
5499 log.u_bbr.cur_del_rate = rxts;
5500 log.u_bbr.bw_inuse = rack_get_lt_bw(rack);
5501 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
5502 log.u_bbr.lt_epoch = (uint32_t)((rack->r_ctl.gp_bw >> 32) & 0x00000000ffffffff);
5503 log.u_bbr.pkts_out = (uint32_t)(rack->r_ctl.gp_bw & 0x00000000ffffffff);
5504 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
5505 0, &log, false, NULL, NULL, 0, &tv);
5507 rack->r_ctl.num_of_clamps_applied = 0;
5508 rack->r_cwnd_was_clamped = 0;
5509 rack->excess_rxt_on = 1;
5510 if (rack->r_ctl.clamp_options) {
5512 * We only allow fillcw to be toggled
5513 * if you are setting a max seg too.
5515 if (rack->r_ctl.clamp_options & 0x1) {
5516 if ((rack->rc_pace_to_cwnd == 0) && (rack->dgp_on == 0)) {
5517 /* turn on fill cw for non-dgp*/
5518 rack->rc_pace_to_cwnd = 0;
5519 } else if ((rack->dgp_on == 1) && (rack->rc_pace_to_cwnd == 1)) {
5520 /* For DGP we want it off */
5521 rack->rc_pace_to_cwnd = 1;
5526 /* Reset all multipliers to 100.0 so just the measured bw */
5527 /* Crash any per boosts down to 100% */
5528 rack->r_ctl.rack_per_of_gp_rec = 100;
5529 rack->r_ctl.rack_per_of_gp_ss = 100;
5530 rack->r_ctl.rack_per_of_gp_ca = 100;
5531 /* Set in an upper bound for ss/ca % increase */
5532 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss;
5533 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca;
5540 do_rack_excess_rxt(struct tcpcb *tp, struct tcp_rack *rack)
5543 * Rack excess rxt accounting is turned on. If we
5544 * are above a threshold of rxt's in at least N
5545 * rounds, then back off the cwnd and ssthresh
5546 * to fit into the long-term b/w.
5548 uint64_t snds, rxts, rxt_per, lt_bw, bdp;
5549 uint32_t rnds, new_cwnd, new_ssthresh, rtt, shared_cwnd_was_enabled = 0;
5551 /* Is it shut off by 0 rounds? */
5552 if (rack_rxt_min_rnds == 0)
5554 if ((rack->r_ctl.max_clamps > 0) &&
5555 (rack->r_ctl.num_of_clamps_applied >= rack->r_ctl.max_clamps)) {
5557 * The idea, if max_clamps is set, is that if clamping it
5558 * N times did not work again, then there is no sense
5559 * clamping it again. The link is just a lossy link and
5560 * our clamps are doing no good. Turn it off so we don't come
5563 rack->excess_rxt_on = 0;
5564 rack->r_cwnd_was_clamped = 0;
5565 rack->r_ctl.num_of_clamps_applied = 0;
5568 snds = tp->t_sndbytes - rack->r_ctl.last_sndbytes;
5569 rxts = tp->t_snd_rxt_bytes - rack->r_ctl.last_snd_rxt_bytes;
5570 rnds = rack->r_ctl.current_round - rack->r_ctl.last_rnd_rxt_clamped;
5571 /* Has enough rounds progressed for us to re-measure? */
5572 if ((rnds >= rack_rxt_min_rnds) &&
5573 (rack->r_ctl.rxt_threshold > 0)){
5574 rxt_per = rxts * 1000;
5576 if (rxt_per >= rack->r_ctl.rxt_threshold) {
5579 * We are above our excess retransmit level, lets
5580 * cut down the cwnd and ssthresh to match the long-term
5581 * b/w we are getting.
5583 /* First disable scwnd if enabled */
5584 #ifdef NETFLIX_SHARED_CWND
5585 rack->rack_enable_scwnd = 0;
5586 if (rack->r_ctl.rc_scw) {
5589 shared_cwnd_was_enabled = 1;
5590 if (rack->r_limit_scw)
5591 limit = max(1, rack->r_ctl.rc_lowest_us_rtt);
5594 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw,
5595 rack->r_ctl.rc_scw_index,
5597 rack->r_ctl.rc_scw = NULL;
5601 /* Calculate what the cwnd and ssthresh should be */
5602 tcp_trace_point(rack->rc_tp, TCP_TP_EXCESS_RXT);
5603 lt_bw = rack_get_lt_bw(rack);
5606 * No lt_bw, lets chop things to one MSS
5607 * and the ssthresh to the iwnd.
5610 new_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
5611 new_ssthresh = tcp_compute_initwnd(tcp_maxseg(tp));
5613 rtt = rack->rc_rack_rtt;
5615 /* If we have no rack_rtt drop to the IW situation */
5618 bdp = lt_bw * (uint64_t)rtt;
5619 bdp /= HPTS_USEC_IN_SEC;
5620 new_cwnd = (uint32_t)bdp;
5621 new_ssthresh = new_cwnd - 1;
5622 if (new_cwnd < ctf_fixed_maxseg(tp)) {
5623 /* Rock bottom, goto IW settings */
5627 rack->r_cwnd_was_clamped = 1;
5628 rack->r_ctl.num_of_clamps_applied++;
5629 /* Reset the counter fromn now */
5630 tp->t_bytes_acked = 0;
5632 * Now what about options?
5633 * We look at the bottom 8 bits:
5634 * F = fill cw bit (toggle it if set)
5636 * M = set max segment bit
5640 if (rack->r_ctl.clamp_options) {
5641 if (rack->r_ctl.clamp_options & 0x1) {
5642 if ((rack->rc_pace_to_cwnd == 0) && (rack->dgp_on == 0)) {
5643 /* turn on fill cw for non-dgp*/
5644 rack->rc_pace_to_cwnd = 1;
5645 } else if ((rack->dgp_on == 1) && (rack->rc_pace_to_cwnd == 1)) {
5646 /* For DGP we want it off */
5647 rack->rc_pace_to_cwnd = 0;
5652 /* Reset all multipliers to 100.0 so just the measured bw */
5653 /* Crash any per boosts down to 100% */
5654 rack->r_ctl.rack_per_of_gp_rec = 100;
5655 rack->r_ctl.rack_per_of_gp_ss = 100;
5656 rack->r_ctl.rack_per_of_gp_ca = 100;
5657 /* Set in an upper bound for ss/ca % increase */
5658 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_clamp_ss_upper;
5659 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_clamp_ca_upper;
5660 /* Now move to the lt_bw */
5661 rack->r_ctl.gp_bw = lt_bw;
5662 rack->rc_gp_filled = 1;
5663 rack->r_ctl.num_measurements = RACK_REQ_AVG;
5665 if (tcp_bblogging_on(rack->rc_tp)) {
5666 union tcp_log_stackspecific log;
5669 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5670 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
5671 log.u_bbr.flex1 = new_cwnd;
5672 log.u_bbr.flex2 = new_ssthresh;
5673 log.u_bbr.flex3 = rnds;
5674 log.u_bbr.flex4 = rack_rxt_min_rnds;
5675 log.u_bbr.flex5 = rtt;
5676 log.u_bbr.flex6 = shared_cwnd_was_enabled;
5677 log.u_bbr.flex8 = 5;
5678 log.u_bbr.pkt_epoch = rack->r_ctl.rc_pace_max_segs;
5679 log.u_bbr.bbr_state = rack->rc_pace_to_cwnd;
5680 log.u_bbr.delivered = rack->r_ctl.num_of_clamps_applied;
5681 log.u_bbr.applimited = rack->r_ctl.max_clamps;
5682 log.u_bbr.epoch = rack->r_ctl.clamp_options;
5683 log.u_bbr.cur_del_rate = rxts;
5684 log.u_bbr.delRate = snds;
5685 log.u_bbr.rttProp = rack->r_ctl.rxt_threshold;
5686 log.u_bbr.bw_inuse = lt_bw;
5687 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
5688 log.u_bbr.lt_epoch = (uint32_t)((rack->r_ctl.gp_bw >> 32) & 0x00000000ffffffff);
5689 log.u_bbr.pkts_out = (uint32_t)(rack->r_ctl.gp_bw & 0x00000000ffffffff);
5690 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
5691 0, &log, false, NULL, NULL, 0, &tv);
5693 /* Update our point where we did it */
5694 if (rack->r_ctl.already_had_a_excess == 0) {
5695 rack->r_ctl.already_had_a_excess = 1;
5696 counter_u64_add(rack_rxt_clamps_cwnd_uniq, 1);
5698 counter_u64_add(rack_rxt_clamps_cwnd, 1);
5699 rack->r_ctl.last_sndbytes = tp->t_sndbytes;
5700 rack->r_ctl.last_snd_rxt_bytes = tp->t_snd_rxt_bytes;
5701 rack->r_ctl.last_rnd_rxt_clamped = rack->r_ctl.current_round;
5702 if (new_cwnd < tp->snd_cwnd)
5703 tp->snd_cwnd = new_cwnd;
5704 if (new_ssthresh < tp->snd_ssthresh)
5705 tp->snd_ssthresh = new_ssthresh;
5711 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack)
5713 struct tcp_rack *rack;
5716 orig_cwnd = tp->snd_cwnd;
5717 INP_WLOCK_ASSERT(tptoinpcb(tp));
5718 rack = (struct tcp_rack *)tp->t_fb_ptr;
5719 /* only alert CC if we alerted when we entered */
5720 if (CC_ALGO(tp)->post_recovery != NULL) {
5721 tp->t_ccv.curack = th_ack;
5722 CC_ALGO(tp)->post_recovery(&tp->t_ccv);
5723 if (tp->snd_cwnd < tp->snd_ssthresh) {
5725 * Rack has burst control and pacing
5726 * so lets not set this any lower than
5727 * snd_ssthresh per RFC-6582 (option 2).
5729 tp->snd_cwnd = tp->snd_ssthresh;
5732 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
5733 union tcp_log_stackspecific log;
5736 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5737 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
5738 log.u_bbr.flex1 = th_ack;
5739 log.u_bbr.flex2 = tp->t_ccv.flags;
5740 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack;
5741 log.u_bbr.flex4 = tp->t_ccv.nsegs;
5742 log.u_bbr.flex5 = V_tcp_abc_l_var;
5743 log.u_bbr.flex6 = orig_cwnd;
5744 log.u_bbr.flex7 = V_tcp_do_newsack;
5745 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
5746 log.u_bbr.flex8 = 2;
5747 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
5748 0, &log, false, NULL, __func__, __LINE__, &tv);
5750 if ((rack->rack_no_prr == 0) &&
5751 (rack->no_prr_addback == 0) &&
5752 (rack->r_ctl.rc_prr_sndcnt > 0)) {
5754 * Suck the next prr cnt back into cwnd, but
5755 * only do that if we are not application limited.
5757 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) {
5759 * We are allowed to add back to the cwnd the amount we did
5761 * a) no_prr_addback is off.
5762 * b) we are not app limited
5763 * c) we are doing prr
5765 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none).
5767 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax),
5768 rack->r_ctl.rc_prr_sndcnt);
5770 rack->r_ctl.rc_prr_sndcnt = 0;
5771 rack_log_to_prr(rack, 1, 0, __LINE__);
5773 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__);
5774 tp->snd_recover = tp->snd_una;
5775 if (rack->r_ctl.dsack_persist) {
5776 rack->r_ctl.dsack_persist--;
5777 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
5778 rack->r_ctl.num_dsack = 0;
5780 rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
5782 EXIT_RECOVERY(tp->t_flags);
5783 if (rack->r_ctl.full_dgp_in_rec)
5784 rack_client_buffer_level_set(rack);
5788 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line)
5790 struct tcp_rack *rack;
5791 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd;
5793 INP_WLOCK_ASSERT(tptoinpcb(tp));
5795 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
5797 if (IN_RECOVERY(tp->t_flags) == 0) {
5798 in_rec_at_entry = 0;
5799 ssthresh_enter = tp->snd_ssthresh;
5800 cwnd_enter = tp->snd_cwnd;
5802 in_rec_at_entry = 1;
5803 rack = (struct tcp_rack *)tp->t_fb_ptr;
5806 tp->t_flags &= ~TF_WASFRECOVERY;
5807 tp->t_flags &= ~TF_WASCRECOVERY;
5808 if (!IN_FASTRECOVERY(tp->t_flags)) {
5809 if (rack->dgp_on && rack->r_cwnd_was_clamped) {
5810 /* Reset the gains so that on exit we will be softer longer */
5811 rack->r_ctl.rack_per_of_gp_rec = 100;
5812 rack->r_ctl.rack_per_of_gp_ss = 98;
5813 rack->r_ctl.rack_per_of_gp_ca = 98;
5815 rack->r_ctl.rc_prr_delivered = 0;
5816 rack->r_ctl.rc_prr_out = 0;
5817 rack->r_fast_output = 0;
5818 if (rack->rack_no_prr == 0) {
5819 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
5820 rack_log_to_prr(rack, 2, in_rec_at_entry, line);
5822 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
5823 tp->snd_recover = tp->snd_max;
5824 if (tp->t_flags2 & TF2_ECN_PERMIT)
5825 tp->t_flags2 |= TF2_ECN_SND_CWR;
5829 if (!IN_CONGRECOVERY(tp->t_flags) ||
5831 * Allow ECN reaction on ACK to CWR, if
5832 * that data segment was also CE marked.
5834 SEQ_GEQ(ack, tp->snd_recover)) {
5835 EXIT_CONGRECOVERY(tp->t_flags);
5836 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd);
5837 rack->r_fast_output = 0;
5838 tp->snd_recover = tp->snd_max + 1;
5839 if (tp->t_flags2 & TF2_ECN_PERMIT)
5840 tp->t_flags2 |= TF2_ECN_SND_CWR;
5845 tp->t_bytes_acked = 0;
5846 rack->r_fast_output = 0;
5847 EXIT_RECOVERY(tp->t_flags);
5848 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 /
5849 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp);
5850 orig_cwnd = tp->snd_cwnd;
5851 tp->snd_cwnd = ctf_fixed_maxseg(tp);
5852 rack_log_to_prr(rack, 16, orig_cwnd, line);
5853 if (tp->t_flags2 & TF2_ECN_PERMIT)
5854 tp->t_flags2 |= TF2_ECN_SND_CWR;
5857 KMOD_TCPSTAT_INC(tcps_sndrexmitbad);
5858 /* RTO was unnecessary, so reset everything. */
5859 tp->snd_cwnd = tp->snd_cwnd_prev;
5860 tp->snd_ssthresh = tp->snd_ssthresh_prev;
5861 tp->snd_recover = tp->snd_recover_prev;
5862 if (tp->t_flags & TF_WASFRECOVERY) {
5863 ENTER_FASTRECOVERY(tp->t_flags);
5864 tp->t_flags &= ~TF_WASFRECOVERY;
5866 if (tp->t_flags & TF_WASCRECOVERY) {
5867 ENTER_CONGRECOVERY(tp->t_flags);
5868 tp->t_flags &= ~TF_WASCRECOVERY;
5870 tp->snd_nxt = tp->snd_max;
5871 tp->t_badrxtwin = 0;
5874 if ((CC_ALGO(tp)->cong_signal != NULL) &&
5876 tp->t_ccv.curack = ack;
5877 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type);
5879 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) {
5880 rack_log_to_prr(rack, 15, cwnd_enter, line);
5881 if (rack->r_ctl.full_dgp_in_rec)
5882 rack_client_buffer_level_set(rack);
5883 rack->r_ctl.dsack_byte_cnt = 0;
5884 rack->r_ctl.retran_during_recovery = 0;
5885 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter;
5886 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter;
5887 rack->r_ent_rec_ns = 1;
5892 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp)
5896 INP_WLOCK_ASSERT(tptoinpcb(tp));
5898 if (CC_ALGO(tp)->after_idle != NULL)
5899 CC_ALGO(tp)->after_idle(&tp->t_ccv);
5901 if (tp->snd_cwnd == 1)
5902 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
5904 i_cwnd = rc_init_window(rack);
5907 * Being idle is no different than the initial window. If the cc
5908 * clamps it down below the initial window raise it to the initial
5911 if (tp->snd_cwnd < i_cwnd) {
5912 tp->snd_cwnd = i_cwnd;
5917 * Indicate whether this ack should be delayed. We can delay the ack if
5918 * following conditions are met:
5919 * - There is no delayed ack timer in progress.
5920 * - Our last ack wasn't a 0-sized window. We never want to delay
5921 * the ack that opens up a 0-sized window.
5922 * - LRO wasn't used for this segment. We make sure by checking that the
5923 * segment size is not larger than the MSS.
5924 * - Delayed acks are enabled or this is a half-synchronized T/TCP
5927 #define DELAY_ACK(tp, tlen) \
5928 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
5929 ((tp->t_flags & TF_DELACK) == 0) && \
5930 (tlen <= tp->t_maxseg) && \
5931 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
5933 static struct rack_sendmap *
5934 rack_find_lowest_rsm(struct tcp_rack *rack)
5936 struct rack_sendmap *rsm;
5939 * Walk the time-order transmitted list looking for an rsm that is
5940 * not acked. This will be the one that was sent the longest time
5941 * ago that is still outstanding.
5943 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
5944 if (rsm->r_flags & RACK_ACKED) {
5953 static struct rack_sendmap *
5954 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
5956 struct rack_sendmap *prsm;
5959 * Walk the sequence order list backward until we hit and arrive at
5960 * the highest seq not acked. In theory when this is called it
5961 * should be the last segment (which it was not).
5965 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) {
5966 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
5975 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts)
5981 * lro is the flag we use to determine if we have seen reordering.
5982 * If it gets set we have seen reordering. The reorder logic either
5983 * works in one of two ways:
5985 * If reorder-fade is configured, then we track the last time we saw
5986 * re-ordering occur. If we reach the point where enough time as
5987 * passed we no longer consider reordering has occuring.
5989 * Or if reorder-face is 0, then once we see reordering we consider
5990 * the connection to alway be subject to reordering and just set lro
5993 * In the end if lro is non-zero we add the extra time for
5998 if (rack->r_ctl.rc_reorder_ts) {
5999 if (rack->r_ctl.rc_reorder_fade) {
6000 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
6001 lro = cts - rack->r_ctl.rc_reorder_ts;
6004 * No time as passed since the last
6005 * reorder, mark it as reordering.
6010 /* Negative time? */
6013 if (lro > rack->r_ctl.rc_reorder_fade) {
6014 /* Turn off reordering seen too */
6015 rack->r_ctl.rc_reorder_ts = 0;
6019 /* Reodering does not fade */
6025 if (rack->rc_rack_tmr_std_based == 0) {
6026 thresh = srtt + rack->r_ctl.rc_pkt_delay;
6028 /* Standards based pkt-delay is 1/4 srtt */
6029 thresh = srtt + (srtt >> 2);
6031 if (lro && (rack->rc_rack_tmr_std_based == 0)) {
6032 /* It must be set, if not you get 1/4 rtt */
6033 if (rack->r_ctl.rc_reorder_shift)
6034 thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
6036 thresh += (srtt >> 2);
6038 if (rack->rc_rack_use_dsack &&
6040 (rack->r_ctl.num_dsack > 0)) {
6042 * We only increase the reordering window if we
6043 * have seen reordering <and> we have a DSACK count.
6045 thresh += rack->r_ctl.num_dsack * (srtt >> 2);
6046 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh);
6048 /* SRTT * 2 is the ceiling */
6049 if (thresh > (srtt * 2)) {
6052 /* And we don't want it above the RTO max either */
6053 if (thresh > rack_rto_max) {
6054 thresh = rack_rto_max;
6056 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh);
6061 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
6062 struct rack_sendmap *rsm, uint32_t srtt)
6064 struct rack_sendmap *prsm;
6065 uint32_t thresh, len;
6070 if (rack->r_ctl.rc_tlp_threshold)
6071 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
6073 thresh = (srtt * 2);
6075 /* Get the previous sent packet, if any */
6076 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
6077 len = rsm->r_end - rsm->r_start;
6078 if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
6079 /* Exactly like the ID */
6080 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) {
6081 uint32_t alt_thresh;
6083 * Compensate for delayed-ack with the d-ack time.
6085 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
6086 if (alt_thresh > thresh)
6087 thresh = alt_thresh;
6089 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
6091 prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
6092 if (prsm && (len <= segsiz)) {
6094 * Two packets outstanding, thresh should be (2*srtt) +
6095 * possible inter-packet delay (if any).
6097 uint32_t inter_gap = 0;
6100 idx = rsm->r_rtr_cnt - 1;
6101 nidx = prsm->r_rtr_cnt - 1;
6102 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) {
6103 /* Yes it was sent later (or at the same time) */
6104 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
6106 thresh += inter_gap;
6107 } else if (len <= segsiz) {
6109 * Possibly compensate for delayed-ack.
6111 uint32_t alt_thresh;
6113 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
6114 if (alt_thresh > thresh)
6115 thresh = alt_thresh;
6117 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
6119 if (len <= segsiz) {
6120 uint32_t alt_thresh;
6122 * Compensate for delayed-ack with the d-ack time.
6124 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
6125 if (alt_thresh > thresh)
6126 thresh = alt_thresh;
6129 /* Not above an RTO */
6130 if (thresh > tp->t_rxtcur) {
6131 thresh = tp->t_rxtcur;
6133 /* Not above a RTO max */
6134 if (thresh > rack_rto_max) {
6135 thresh = rack_rto_max;
6137 /* Apply user supplied min TLP */
6138 if (thresh < rack_tlp_min) {
6139 thresh = rack_tlp_min;
6145 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack)
6148 * We want the rack_rtt which is the
6149 * last rtt we measured. However if that
6150 * does not exist we fallback to the srtt (which
6151 * we probably will never do) and then as a last
6152 * resort we use RACK_INITIAL_RTO if no srtt is
6155 if (rack->rc_rack_rtt)
6156 return (rack->rc_rack_rtt);
6157 else if (tp->t_srtt == 0)
6158 return (RACK_INITIAL_RTO);
6159 return (tp->t_srtt);
6162 static struct rack_sendmap *
6163 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
6166 * Check to see that we don't need to fall into recovery. We will
6167 * need to do so if our oldest transmit is past the time we should
6170 struct tcp_rack *rack;
6171 struct rack_sendmap *rsm;
6173 uint32_t srtt, thresh;
6175 rack = (struct tcp_rack *)tp->t_fb_ptr;
6176 if (tqhash_empty(rack->r_ctl.tqh)) {
6179 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6184 if (rsm->r_flags & RACK_ACKED) {
6185 rsm = rack_find_lowest_rsm(rack);
6189 idx = rsm->r_rtr_cnt - 1;
6190 srtt = rack_grab_rtt(tp, rack);
6191 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
6192 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) {
6195 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) {
6198 /* Ok if we reach here we are over-due and this guy can be sent */
6199 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
6204 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
6210 t = (tp->t_srtt + (tp->t_rttvar << 2));
6211 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
6212 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop);
6213 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
6214 ret_val = (uint32_t)tt;
6219 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack)
6222 * Start the FR timer, we do this based on getting the first one in
6223 * the rc_tmap. Note that if its NULL we must stop the timer. in all
6224 * events we need to stop the running timer (if its running) before
6225 * starting the new one.
6227 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse;
6230 int32_t is_tlp_timer = 0;
6231 struct rack_sendmap *rsm;
6233 if (rack->t_timers_stopped) {
6234 /* All timers have been stopped none are to run */
6237 if (rack->rc_in_persist) {
6238 /* We can't start any timer in persists */
6239 return (rack_get_persists_timer_val(tp, rack));
6241 rack->rc_on_min_to = 0;
6242 if ((tp->t_state < TCPS_ESTABLISHED) ||
6243 (rack->sack_attack_disable > 0) ||
6244 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
6247 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6248 if ((rsm == NULL) || sup_rack) {
6249 /* Nothing on the send map or no rack */
6251 time_since_sent = 0;
6252 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6255 * Should we discount the RTX timer any?
6257 * We want to discount it the smallest amount.
6258 * If a timer (Rack/TLP or RXT) has gone off more
6259 * recently thats the discount we want to use (now - timer time).
6260 * If the retransmit of the oldest packet was more recent then
6261 * we want to use that (now - oldest-packet-last_transmit_time).
6264 idx = rsm->r_rtr_cnt - 1;
6265 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx])))
6266 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
6268 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
6269 if (TSTMP_GT(cts, tstmp_touse))
6270 time_since_sent = cts - tstmp_touse;
6272 if (SEQ_LT(tp->snd_una, tp->snd_max) ||
6273 sbavail(&tptosocket(tp)->so_snd)) {
6274 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
6276 if (to > time_since_sent)
6277 to -= time_since_sent;
6279 to = rack->r_ctl.rc_min_to;
6282 /* Special case for KEEPINIT */
6283 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
6284 (TP_KEEPINIT(tp) != 0) &&
6287 * We have to put a ceiling on the rxt timer
6288 * of the keep-init timeout.
6290 uint32_t max_time, red;
6292 max_time = TICKS_2_USEC(TP_KEEPINIT(tp));
6293 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) {
6294 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]);
6300 /* Reduce timeout to the keep value if needed */
6308 if (rsm->r_flags & RACK_ACKED) {
6309 rsm = rack_find_lowest_rsm(rack);
6315 if (rack->sack_attack_disable) {
6317 * We don't want to do
6318 * any TLP's if you are an attacker.
6319 * Though if you are doing what
6320 * is expected you may still have
6321 * SACK-PASSED marks.
6325 /* Convert from ms to usecs */
6326 if ((rsm->r_flags & RACK_SACK_PASSED) ||
6327 (rsm->r_flags & RACK_RWND_COLLAPSED) ||
6328 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
6329 if ((tp->t_flags & TF_SENTFIN) &&
6330 ((tp->snd_max - tp->snd_una) == 1) &&
6331 (rsm->r_flags & RACK_HAS_FIN)) {
6333 * We don't start a rack timer if all we have is a
6338 if ((rack->use_rack_rr == 0) &&
6339 (IN_FASTRECOVERY(tp->t_flags)) &&
6340 (rack->rack_no_prr == 0) &&
6341 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) {
6343 * We are not cheating, in recovery and
6344 * not enough ack's to yet get our next
6345 * retransmission out.
6347 * Note that classified attackers do not
6348 * get to use the rack-cheat.
6352 srtt = rack_grab_rtt(tp, rack);
6353 thresh = rack_calc_thresh_rack(rack, srtt, cts);
6354 idx = rsm->r_rtr_cnt - 1;
6355 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh;
6356 if (SEQ_GEQ(exp, cts)) {
6358 if (to < rack->r_ctl.rc_min_to) {
6359 to = rack->r_ctl.rc_min_to;
6360 if (rack->r_rr_config == 3)
6361 rack->rc_on_min_to = 1;
6364 to = rack->r_ctl.rc_min_to;
6365 if (rack->r_rr_config == 3)
6366 rack->rc_on_min_to = 1;
6369 /* Ok we need to do a TLP not RACK */
6371 if ((rack->rc_tlp_in_progress != 0) &&
6372 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) {
6374 * The previous send was a TLP and we have sent
6375 * N TLP's without sending new data.
6379 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
6381 /* We found no rsm to TLP with. */
6384 if (rsm->r_flags & RACK_HAS_FIN) {
6385 /* If its a FIN we dont do TLP */
6389 idx = rsm->r_rtr_cnt - 1;
6390 time_since_sent = 0;
6391 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time))
6392 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
6394 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
6395 if (TSTMP_GT(cts, tstmp_touse))
6396 time_since_sent = cts - tstmp_touse;
6399 if ((rack->rc_srtt_measure_made == 0) &&
6400 (tp->t_srtt == 1)) {
6402 * If another stack as run and set srtt to 1,
6403 * then the srtt was 0, so lets use the initial.
6405 srtt = RACK_INITIAL_RTO;
6407 srtt_cur = tp->t_srtt;
6411 srtt = RACK_INITIAL_RTO;
6413 * If the SRTT is not keeping up and the
6414 * rack RTT has spiked we want to use
6415 * the last RTT not the smoothed one.
6417 if (rack_tlp_use_greater &&
6419 (srtt < rack_grab_rtt(tp, rack))) {
6420 srtt = rack_grab_rtt(tp, rack);
6422 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
6423 if (thresh > time_since_sent) {
6424 to = thresh - time_since_sent;
6426 to = rack->r_ctl.rc_min_to;
6427 rack_log_alt_to_to_cancel(rack,
6429 time_since_sent, /* flex2 */
6430 tstmp_touse, /* flex3 */
6431 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */
6432 (uint32_t)rsm->r_tim_lastsent[idx],
6436 if (to < rack_tlp_min) {
6439 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) {
6441 * If the TLP time works out to larger than the max
6442 * RTO lets not do TLP.. just RTO.
6447 if (is_tlp_timer == 0) {
6448 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
6450 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
6458 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una)
6462 if (rack->rc_in_persist == 0) {
6463 if (tp->t_flags & TF_GPUTINPROG) {
6465 * Stop the goodput now, the calling of the
6466 * measurement function clears the flag.
6468 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__,
6469 RACK_QUALITY_PERSIST);
6471 #ifdef NETFLIX_SHARED_CWND
6472 if (rack->r_ctl.rc_scw) {
6473 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
6474 rack->rack_scwnd_is_idle = 1;
6477 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(&tv);
6478 if (rack->lt_bw_up) {
6479 /* Suspend our LT BW measurement */
6482 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq);
6483 rack->r_ctl.lt_seq = snd_una;
6484 tmark = tcp_tv_to_lusectick(&tv);
6485 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark);
6486 rack->r_ctl.lt_timemark = tmark;
6488 rack->r_persist_lt_bw_off = 1;
6490 if (rack->r_ctl.rc_went_idle_time == 0)
6491 rack->r_ctl.rc_went_idle_time = 1;
6492 rack_timer_cancel(tp, rack, cts, __LINE__);
6493 rack->r_ctl.persist_lost_ends = 0;
6494 rack->probe_not_answered = 0;
6495 rack->forced_ack = 0;
6497 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
6498 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
6499 rack->rc_in_persist = 1;
6504 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6509 if (tcp_in_hpts(rack->rc_inp)) {
6510 tcp_hpts_remove(rack->rc_inp);
6511 rack->r_ctl.rc_hpts_flags = 0;
6513 #ifdef NETFLIX_SHARED_CWND
6514 if (rack->r_ctl.rc_scw) {
6515 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
6516 rack->rack_scwnd_is_idle = 0;
6519 t_time = tcp_get_usecs(&tv);
6520 if (rack->rc_gp_dyn_mul &&
6521 (rack->use_fixed_rate == 0) &&
6522 (rack->rc_always_pace)) {
6524 * Do we count this as if a probe-rtt just
6527 uint32_t time_idle, idle_min;
6529 time_idle = t_time - rack->r_ctl.rc_went_idle_time;
6530 idle_min = rack_min_probertt_hold;
6531 if (rack_probertt_gpsrtt_cnt_div) {
6533 extra = (uint64_t)rack->r_ctl.rc_gp_srtt *
6534 (uint64_t)rack_probertt_gpsrtt_cnt_mul;
6535 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div;
6536 idle_min += (uint32_t)extra;
6538 if (time_idle >= idle_min) {
6539 /* Yes, we count it as a probe-rtt. */
6542 us_cts = tcp_get_usecs(NULL);
6543 if (rack->in_probe_rtt == 0) {
6544 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
6545 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
6546 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
6547 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
6549 rack_exit_probertt(rack, us_cts);
6553 if (rack->r_persist_lt_bw_off) {
6554 /* Continue where we left off */
6555 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv);
6557 rack->r_persist_lt_bw_off = 0;
6559 rack->rc_in_persist = 0;
6560 rack->r_ctl.rc_went_idle_time = 0;
6562 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
6563 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
6564 rack->r_ctl.rc_agg_delayed = 0;
6567 rack->r_ctl.rc_agg_early = 0;
6571 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
6572 struct hpts_diag *diag, struct timeval *tv)
6574 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
6575 union tcp_log_stackspecific log;
6577 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
6578 log.u_bbr.flex1 = diag->p_nxt_slot;
6579 log.u_bbr.flex2 = diag->p_cur_slot;
6580 log.u_bbr.flex3 = diag->slot_req;
6581 log.u_bbr.flex4 = diag->inp_hptsslot;
6582 log.u_bbr.flex5 = diag->slot_remaining;
6583 log.u_bbr.flex6 = diag->need_new_to;
6584 log.u_bbr.flex7 = diag->p_hpts_active;
6585 log.u_bbr.flex8 = diag->p_on_min_sleep;
6586 /* Hijack other fields as needed */
6587 log.u_bbr.epoch = diag->have_slept;
6588 log.u_bbr.lt_epoch = diag->yet_to_sleep;
6589 log.u_bbr.pkts_out = diag->co_ret;
6590 log.u_bbr.applimited = diag->hpts_sleep_time;
6591 log.u_bbr.delivered = diag->p_prev_slot;
6592 log.u_bbr.inflight = diag->p_runningslot;
6593 log.u_bbr.bw_inuse = diag->wheel_slot;
6594 log.u_bbr.rttProp = diag->wheel_cts;
6595 log.u_bbr.timeStamp = cts;
6596 log.u_bbr.delRate = diag->maxslots;
6597 log.u_bbr.cur_del_rate = diag->p_curtick;
6598 log.u_bbr.cur_del_rate <<= 32;
6599 log.u_bbr.cur_del_rate |= diag->p_lasttick;
6600 TCP_LOG_EVENTP(rack->rc_tp, NULL,
6601 &rack->rc_inp->inp_socket->so_rcv,
6602 &rack->rc_inp->inp_socket->so_snd,
6603 BBR_LOG_HPTSDIAG, 0,
6604 0, &log, false, tv);
6610 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type)
6612 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
6613 union tcp_log_stackspecific log;
6616 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
6617 log.u_bbr.flex1 = sb->sb_flags;
6618 log.u_bbr.flex2 = len;
6619 log.u_bbr.flex3 = sb->sb_state;
6620 log.u_bbr.flex8 = type;
6621 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
6622 TCP_LOG_EVENTP(rack->rc_tp, NULL,
6623 &rack->rc_inp->inp_socket->so_rcv,
6624 &rack->rc_inp->inp_socket->so_snd,
6626 len, &log, false, &tv);
6631 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
6632 int32_t slot, uint32_t tot_len_this_send, int sup_rack)
6634 struct hpts_diag diag;
6635 struct inpcb *inp = tptoinpcb(tp);
6637 uint32_t delayed_ack = 0;
6638 uint32_t hpts_timeout;
6639 uint32_t entry_slot = slot;
6644 if ((tp->t_state == TCPS_CLOSED) ||
6645 (tp->t_state == TCPS_LISTEN)) {
6648 if (tcp_in_hpts(inp)) {
6649 /* Already on the pacer */
6652 stopped = rack->rc_tmr_stopped;
6653 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
6654 left = rack->r_ctl.rc_timer_exp - cts;
6656 rack->r_ctl.rc_timer_exp = 0;
6657 rack->r_ctl.rc_hpts_flags = 0;
6658 us_cts = tcp_get_usecs(&tv);
6659 /* Now early/late accounting */
6660 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0);
6661 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) {
6663 * We have a early carry over set,
6664 * we can always add more time so we
6665 * can always make this compensation.
6667 * Note if ack's are allowed to wake us do not
6668 * penalize the next timer for being awoke
6669 * by an ack aka the rc_agg_early (non-paced mode).
6671 slot += rack->r_ctl.rc_agg_early;
6673 rack->r_ctl.rc_agg_early = 0;
6677 * This is harder, we can
6678 * compensate some but it
6679 * really depends on what
6680 * the current pacing time is.
6682 if (rack->r_ctl.rc_agg_delayed >= slot) {
6684 * We can't compensate for it all.
6685 * And we have to have some time
6686 * on the clock. We always have a min
6687 * 10 slots (10 x 10 i.e. 100 usecs).
6689 if (slot <= HPTS_TICKS_PER_SLOT) {
6691 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot);
6692 slot = HPTS_TICKS_PER_SLOT;
6694 /* We take off some */
6695 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT);
6696 slot = HPTS_TICKS_PER_SLOT;
6699 slot -= rack->r_ctl.rc_agg_delayed;
6700 rack->r_ctl.rc_agg_delayed = 0;
6701 /* Make sure we have 100 useconds at minimum */
6702 if (slot < HPTS_TICKS_PER_SLOT) {
6703 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot;
6704 slot = HPTS_TICKS_PER_SLOT;
6706 if (rack->r_ctl.rc_agg_delayed == 0)
6710 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack);
6711 #ifdef TCP_SAD_DETECTION
6712 if (rack->sack_attack_disable &&
6713 (rack->r_ctl.ack_during_sd > 0) &&
6714 (slot < tcp_sad_pacing_interval)) {
6716 * We have a potential attacker on
6717 * the line. We have possibly some
6718 * (or now) pacing time set. We want to
6719 * slow down the processing of sacks by some
6720 * amount (if it is an attacker). Set the default
6721 * slot for attackers in place (unless the original
6722 * interval is longer). Its stored in
6723 * micro-seconds, so lets convert to msecs.
6725 slot = tcp_sad_pacing_interval;
6726 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__);
6727 rack->r_ctl.ack_during_sd = 0;
6730 if (tp->t_flags & TF_DELACK) {
6731 delayed_ack = TICKS_2_USEC(tcp_delacktime);
6732 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
6734 if (delayed_ack && ((hpts_timeout == 0) ||
6735 (delayed_ack < hpts_timeout)))
6736 hpts_timeout = delayed_ack;
6738 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
6740 * If no timers are going to run and we will fall off the hptsi
6741 * wheel, we resort to a keep-alive timer if its configured.
6743 if ((hpts_timeout == 0) &&
6745 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
6746 (tp->t_state <= TCPS_CLOSING)) {
6748 * Ok we have no timer (persists, rack, tlp, rxt or
6749 * del-ack), we don't have segments being paced. So
6750 * all that is left is the keepalive timer.
6752 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
6753 /* Get the established keep-alive time */
6754 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp));
6757 * Get the initial setup keep-alive time,
6758 * note that this is probably not going to
6759 * happen, since rack will be running a rxt timer
6760 * if a SYN of some sort is outstanding. It is
6761 * actually handled in rack_timeout_rxt().
6763 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp));
6765 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
6766 if (rack->in_probe_rtt) {
6768 * We want to instead not wake up a long time from
6769 * now but to wake up about the time we would
6770 * exit probe-rtt and initiate a keep-alive ack.
6771 * This will get us out of probe-rtt and update
6774 hpts_timeout = rack_min_probertt_hold;
6778 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
6779 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
6781 * RACK, TLP, persists and RXT timers all are restartable
6782 * based on actions input .. i.e we received a packet (ack
6783 * or sack) and that changes things (rw, or snd_una etc).
6784 * Thus we can restart them with a new value. For
6785 * keep-alive, delayed_ack we keep track of what was left
6786 * and restart the timer with a smaller value.
6788 if (left < hpts_timeout)
6789 hpts_timeout = left;
6793 * Hack alert for now we can't time-out over 2,147,483
6794 * seconds (a bit more than 596 hours), which is probably ok
6797 if (hpts_timeout > 0x7ffffffe)
6798 hpts_timeout = 0x7ffffffe;
6799 rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
6801 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0);
6802 if ((rack->gp_ready == 0) &&
6803 (rack->use_fixed_rate == 0) &&
6804 (hpts_timeout < slot) &&
6805 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
6807 * We have no good estimate yet for the
6808 * old clunky burst mitigation or the
6809 * real pacing. And the tlp or rxt is smaller
6810 * than the pacing calculation. Lets not
6811 * pace that long since we know the calculation
6812 * so far is not accurate.
6814 slot = hpts_timeout;
6817 * Turn off all the flags for queuing by default. The
6818 * flags have important meanings to what happens when
6819 * LRO interacts with the transport. Most likely (by default now)
6820 * mbuf_queueing and ack compression are on. So the transport
6821 * has a couple of flags that control what happens (if those
6822 * are not on then these flags won't have any effect since it
6823 * won't go through the queuing LRO path).
6825 * INP_MBUF_QUEUE_READY - This flags says that I am busy
6826 * pacing output, so don't disturb. But
6827 * it also means LRO can wake me if there
6828 * is a SACK arrival.
6830 * INP_DONT_SACK_QUEUE - This flag is used in conjunction
6831 * with the above flag (QUEUE_READY) and
6832 * when present it says don't even wake me
6833 * if a SACK arrives.
6835 * The idea behind these flags is that if we are pacing we
6836 * set the MBUF_QUEUE_READY and only get woken up if
6837 * a SACK arrives (which could change things) or if
6838 * our pacing timer expires. If, however, we have a rack
6839 * timer running, then we don't even want a sack to wake
6840 * us since the rack timer has to expire before we can send.
6842 * Other cases should usually have none of the flags set
6843 * so LRO can call into us.
6845 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
6847 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
6848 rack->r_ctl.rc_last_output_to = us_cts + slot;
6850 * A pacing timer (slot) is being set, in
6851 * such a case we cannot send (we are blocked by
6852 * the timer). So lets tell LRO that it should not
6853 * wake us unless there is a SACK. Note this only
6854 * will be effective if mbuf queueing is on or
6855 * compressed acks are being processed.
6857 inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
6859 * But wait if we have a Rack timer running
6860 * even a SACK should not disturb us (with
6861 * the exception of r_rr_config 3).
6863 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) {
6864 if (rack->r_rr_config != 3)
6865 inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
6866 else if (rack->rc_pace_dnd) {
6867 if (IN_RECOVERY(tp->t_flags)) {
6869 * When DND is on, we only let a sack
6870 * interrupt us if we are not in recovery.
6872 * If DND is off, then we never hit here
6873 * and let all sacks wake us up.
6876 inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
6880 /* For sack attackers we want to ignore sack */
6881 if (rack->sack_attack_disable == 1) {
6882 inp->inp_flags2 |= (INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
6883 } else if (rack->rc_ack_can_sendout_data) {
6885 * Ahh but wait, this is that special case
6886 * where the pacing timer can be disturbed
6887 * backout the changes (used for non-paced
6890 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
6892 if ((rack->use_rack_rr) &&
6893 (rack->r_rr_config < 2) &&
6894 ((hpts_timeout) && (hpts_timeout < slot))) {
6896 * Arrange for the hpts to kick back in after the
6897 * t-o if the t-o does not cause a send.
6899 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout),
6901 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
6902 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
6904 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(slot),
6906 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
6907 rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
6909 } else if (hpts_timeout) {
6911 * With respect to inp_flags2 here, lets let any new acks wake
6912 * us up here. Since we are not pacing (no pacing timer), output
6913 * can happen so we should let it. If its a Rack timer, then any inbound
6914 * packet probably won't change the sending (we will be blocked)
6915 * but it may change the prr stats so letting it in (the set defaults
6916 * at the start of this block) are good enough.
6918 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
6919 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout),
6921 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
6922 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
6924 /* No timer starting */
6926 if (SEQ_GT(tp->snd_max, tp->snd_una)) {
6927 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
6928 tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
6932 rack->rc_tmr_stopped = 0;
6934 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__);
6938 * RACK Timer, here we simply do logging and house keeping.
6939 * the normal rack_output() function will call the
6940 * appropriate thing to check if we need to do a RACK retransmit.
6941 * We return 1, saying don't proceed with rack_output only
6942 * when all timers have been stopped (destroyed PCB?).
6945 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6948 * This timer simply provides an internal trigger to send out data.
6949 * The check_recovery_mode call will see if there are needed
6950 * retransmissions, if so we will enter fast-recovery. The output
6951 * call may or may not do the same thing depending on sysctl
6954 struct rack_sendmap *rsm;
6956 counter_u64_add(rack_to_tot, 1);
6957 if (rack->r_state && (rack->r_state != tp->t_state))
6958 rack_set_state(tp, rack);
6959 rack->rc_on_min_to = 0;
6960 rsm = rack_check_recovery_mode(tp, cts);
6961 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm);
6963 rack->r_ctl.rc_resend = rsm;
6964 rack->r_timer_override = 1;
6965 if (rack->use_rack_rr) {
6967 * Don't accumulate extra pacing delay
6968 * we are allowing the rack timer to
6969 * over-ride pacing i.e. rrr takes precedence
6970 * if the pacing interval is longer than the rrr
6971 * time (in other words we get the min pacing
6972 * time versus rrr pacing time).
6974 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
6977 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
6979 /* restart a timer and return 1 */
6980 rack_start_hpts_timer(rack, tp, cts,
6990 rack_adjust_orig_mlen(struct rack_sendmap *rsm)
6993 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) {
6995 * The trailing space changed, mbufs can grow
6996 * at the tail but they can't shrink from
6997 * it, KASSERT that. Adjust the orig_m_len to
6998 * compensate for this change.
7000 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)),
7001 ("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n",
7004 (intmax_t)M_TRAILINGROOM(rsm->m),
7008 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m));
7009 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
7011 if (rsm->m->m_len < rsm->orig_m_len) {
7013 * Mbuf shrank, trimmed off the top by an ack, our
7016 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)),
7017 ("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n",
7018 rsm->m, rsm->m->m_len,
7019 rsm, rsm->orig_m_len,
7021 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len))
7022 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len);
7025 rsm->orig_m_len = rsm->m->m_len;
7027 } else if (rsm->m->m_len > rsm->orig_m_len) {
7028 panic("rsm:%p m:%p m_len grew outside of t_space compensation",
7035 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm)
7041 ((src_rsm->orig_m_len != src_rsm->m->m_len) ||
7042 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) {
7043 /* Fix up the orig_m_len and possibly the mbuf offset */
7044 rack_adjust_orig_mlen(src_rsm);
7047 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start);
7048 while (soff >= m->m_len) {
7049 /* Move out past this mbuf */
7052 KASSERT((m != NULL),
7053 ("rsm:%p nrsm:%p hit at soff:%u null m",
7054 src_rsm, rsm, soff));
7056 /* This should *not* happen which is why there is a kassert */
7057 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd,
7058 (src_rsm->r_start - rack->rc_tp->snd_una),
7060 src_rsm->orig_m_len = src_rsm->m->m_len;
7061 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m);
7062 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd,
7063 (rsm->r_start - rack->rc_tp->snd_una),
7065 rsm->orig_m_len = rsm->m->m_len;
7066 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
7072 rsm->orig_m_len = m->m_len;
7073 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
7076 static __inline void
7077 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm,
7078 struct rack_sendmap *rsm, uint32_t start)
7082 nrsm->r_start = start;
7083 nrsm->r_end = rsm->r_end;
7084 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
7085 nrsm->r_flags = rsm->r_flags;
7086 nrsm->r_dupack = rsm->r_dupack;
7087 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed;
7088 nrsm->r_rtr_bytes = 0;
7089 nrsm->r_fas = rsm->r_fas;
7090 nrsm->r_bas = rsm->r_bas;
7091 rsm->r_end = nrsm->r_start;
7092 nrsm->r_just_ret = rsm->r_just_ret;
7093 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
7094 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
7096 /* Now if we have SYN flag we keep it on the left edge */
7097 if (nrsm->r_flags & RACK_HAS_SYN)
7098 nrsm->r_flags &= ~RACK_HAS_SYN;
7099 /* Now if we have a FIN flag we keep it on the right edge */
7100 if (rsm->r_flags & RACK_HAS_FIN)
7101 rsm->r_flags &= ~RACK_HAS_FIN;
7102 /* Push bit must go to the right edge as well */
7103 if (rsm->r_flags & RACK_HAD_PUSH)
7104 rsm->r_flags &= ~RACK_HAD_PUSH;
7105 /* Clone over the state of the hw_tls flag */
7106 nrsm->r_hw_tls = rsm->r_hw_tls;
7108 * Now we need to find nrsm's new location in the mbuf chain
7109 * we basically calculate a new offset, which is soff +
7110 * how much is left in original rsm. Then we walk out the mbuf
7111 * chain to find the righ position, it may be the same mbuf
7114 KASSERT(((rsm->m != NULL) ||
7115 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))),
7116 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack));
7118 rack_setup_offset_for_rsm(rack, rsm, nrsm);
7121 static struct rack_sendmap *
7122 rack_merge_rsm(struct tcp_rack *rack,
7123 struct rack_sendmap *l_rsm,
7124 struct rack_sendmap *r_rsm)
7127 * We are merging two ack'd RSM's,
7128 * the l_rsm is on the left (lower seq
7129 * values) and the r_rsm is on the right
7130 * (higher seq value). The simplest way
7131 * to merge these is to move the right
7132 * one into the left. I don't think there
7133 * is any reason we need to try to find
7134 * the oldest (or last oldest retransmitted).
7136 rack_log_map_chg(rack->rc_tp, rack, NULL,
7137 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__);
7138 l_rsm->r_end = r_rsm->r_end;
7139 if (l_rsm->r_dupack < r_rsm->r_dupack)
7140 l_rsm->r_dupack = r_rsm->r_dupack;
7141 if (r_rsm->r_rtr_bytes)
7142 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes;
7143 if (r_rsm->r_in_tmap) {
7144 /* This really should not happen */
7145 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext);
7146 r_rsm->r_in_tmap = 0;
7150 if (r_rsm->r_flags & RACK_HAS_FIN)
7151 l_rsm->r_flags |= RACK_HAS_FIN;
7152 if (r_rsm->r_flags & RACK_TLP)
7153 l_rsm->r_flags |= RACK_TLP;
7154 if (r_rsm->r_flags & RACK_RWND_COLLAPSED)
7155 l_rsm->r_flags |= RACK_RWND_COLLAPSED;
7156 if ((r_rsm->r_flags & RACK_APP_LIMITED) &&
7157 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) {
7159 * If both are app-limited then let the
7160 * free lower the count. If right is app
7161 * limited and left is not, transfer.
7163 l_rsm->r_flags |= RACK_APP_LIMITED;
7164 r_rsm->r_flags &= ~RACK_APP_LIMITED;
7165 if (r_rsm == rack->r_ctl.rc_first_appl)
7166 rack->r_ctl.rc_first_appl = l_rsm;
7168 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE);
7170 * We keep the largest value, which is the newest
7171 * send. We do this in case a segment that is
7172 * joined together and not part of a GP estimate
7173 * later gets expanded into the GP estimate.
7175 * We prohibit the merging of unlike kinds i.e.
7176 * all pieces that are in the GP estimate can be
7177 * merged and all pieces that are not in a GP estimate
7178 * can be merged, but not disimilar pieces. Combine
7179 * this with taking the highest here and we should
7180 * be ok unless of course the client reneges. Then
7183 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] <
7184 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) {
7185 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)];
7188 * When merging two RSM's we also need to consider the ack time and keep
7189 * newest. If the ack gets merged into a measurement then that is the
7190 * one we will want to be using.
7192 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival)
7193 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival;
7195 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) {
7196 /* Transfer the split limit to the map we free */
7197 r_rsm->r_limit_type = l_rsm->r_limit_type;
7198 l_rsm->r_limit_type = 0;
7200 rack_free(rack, r_rsm);
7201 l_rsm->r_flags |= RACK_MERGED;
7206 * TLP Timer, here we simply setup what segment we want to
7207 * have the TLP expire on, the normal rack_output() will then
7210 * We return 1, saying don't proceed with rack_output only
7211 * when all timers have been stopped (destroyed PCB?).
7214 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp)
7219 struct rack_sendmap *rsm = NULL;
7220 int insret __diagused;
7221 struct socket *so = tptosocket(tp);
7223 uint32_t out, avail;
7224 int collapsed_win = 0;
7226 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
7227 /* Its not time yet */
7230 if (ctf_progress_timeout_check(tp, true)) {
7231 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
7232 return (-ETIMEDOUT); /* tcp_drop() */
7235 * A TLP timer has expired. We have been idle for 2 rtts. So we now
7236 * need to figure out how to force a full MSS segment out.
7238 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL);
7239 rack->r_ctl.retran_during_recovery = 0;
7240 rack->r_ctl.dsack_byte_cnt = 0;
7241 counter_u64_add(rack_tlp_tot, 1);
7242 if (rack->r_state && (rack->r_state != tp->t_state))
7243 rack_set_state(tp, rack);
7244 avail = sbavail(&so->so_snd);
7245 out = tp->snd_max - tp->snd_una;
7246 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) {
7247 /* special case, we need a retransmission */
7251 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) {
7252 rack->r_ctl.dsack_persist--;
7253 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
7254 rack->r_ctl.num_dsack = 0;
7256 rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
7258 if ((tp->t_flags & TF_GPUTINPROG) &&
7259 (rack->r_ctl.rc_tlp_cnt_out == 1)) {
7261 * If this is the second in a row
7262 * TLP and we are doing a measurement
7263 * its time to abandon the measurement.
7264 * Something is likely broken on
7265 * the clients network and measuring a
7266 * broken network does us no good.
7268 tp->t_flags &= ~TF_GPUTINPROG;
7269 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
7270 rack->r_ctl.rc_gp_srtt /*flex1*/,
7272 0, 0, 18, __LINE__, NULL, 0);
7275 * Check our send oldest always settings, and if
7276 * there is an oldest to send jump to the need_retran.
7278 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0))
7282 /* New data is available */
7284 if (amm > ctf_fixed_maxseg(tp)) {
7285 amm = ctf_fixed_maxseg(tp);
7286 if ((amm + out) > tp->snd_wnd) {
7287 /* We are rwnd limited */
7290 } else if (amm < ctf_fixed_maxseg(tp)) {
7291 /* not enough to fill a MTU */
7294 if (IN_FASTRECOVERY(tp->t_flags)) {
7296 if (rack->rack_no_prr == 0) {
7297 if (out + amm <= tp->snd_wnd) {
7298 rack->r_ctl.rc_prr_sndcnt = amm;
7299 rack->r_ctl.rc_tlp_new_data = amm;
7300 rack_log_to_prr(rack, 4, 0, __LINE__);
7305 /* Set the send-new override */
7306 if (out + amm <= tp->snd_wnd)
7307 rack->r_ctl.rc_tlp_new_data = amm;
7311 rack->r_ctl.rc_tlpsend = NULL;
7312 counter_u64_add(rack_tlp_newdata, 1);
7317 * Ok we need to arrange the last un-acked segment to be re-sent, or
7318 * optionally the first un-acked segment.
7320 if (collapsed_win == 0) {
7321 if (rack_always_send_oldest)
7322 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
7324 rsm = tqhash_max(rack->r_ctl.tqh);
7325 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
7326 rsm = rack_find_high_nonack(rack, rsm);
7331 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
7337 * We had a collapsed window, lets find
7338 * the point before the collapse.
7340 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una))
7341 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1));
7343 rsm = tqhash_min(rack->r_ctl.tqh);
7350 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) {
7352 * We need to split this the last segment in two.
7354 struct rack_sendmap *nrsm;
7356 nrsm = rack_alloc_full_limit(rack);
7359 * No memory to split, we will just exit and punt
7360 * off to the RXT timer.
7364 rack_clone_rsm(rack, nrsm, rsm,
7365 (rsm->r_end - ctf_fixed_maxseg(tp)));
7366 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
7368 (void)tqhash_insert(rack->r_ctl.tqh, nrsm);
7370 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
7371 panic("Insert in rb tree of %p fails ret:%d rack:%p rsm:%p",
7372 nrsm, insret, rack, rsm);
7375 if (rsm->r_in_tmap) {
7376 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7377 nrsm->r_in_tmap = 1;
7381 rack->r_ctl.rc_tlpsend = rsm;
7383 /* Make sure output path knows we are doing a TLP */
7385 rack->r_timer_override = 1;
7386 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
7389 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
7394 * Delayed ack Timer, here we simply need to setup the
7395 * ACK_NOW flag and remove the DELACK flag. From there
7396 * the output routine will send the ack out.
7398 * We only return 1, saying don't proceed, if all timers
7399 * are stopped (destroyed PCB?).
7402 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
7405 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL);
7406 tp->t_flags &= ~TF_DELACK;
7407 tp->t_flags |= TF_ACKNOW;
7408 KMOD_TCPSTAT_INC(tcps_delack);
7409 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
7414 * Persists timer, here we simply send the
7415 * same thing as a keepalive will.
7416 * the one byte send.
7418 * We only return 1, saying don't proceed, if all timers
7419 * are stopped (destroyed PCB?).
7422 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
7424 struct tcptemp *t_template;
7427 if (rack->rc_in_persist == 0)
7429 if (ctf_progress_timeout_check(tp, false)) {
7430 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
7431 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
7432 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
7433 return (-ETIMEDOUT); /* tcp_drop() */
7436 * Persistence timer into zero window. Force a byte to be output, if
7439 KMOD_TCPSTAT_INC(tcps_persisttimeo);
7441 * Hack: if the peer is dead/unreachable, we do not time out if the
7442 * window is closed. After a full backoff, drop the connection if
7443 * the idle time (no responses to probes) reaches the maximum
7444 * backoff that we would use if retransmitting.
7446 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
7447 (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
7448 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) {
7449 KMOD_TCPSTAT_INC(tcps_persistdrop);
7450 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
7451 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
7452 retval = -ETIMEDOUT; /* tcp_drop() */
7455 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
7456 tp->snd_una == tp->snd_max)
7457 rack_exit_persist(tp, rack, cts);
7458 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
7460 * If the user has closed the socket then drop a persisting
7461 * connection after a much reduced timeout.
7463 if (tp->t_state > TCPS_CLOSE_WAIT &&
7464 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
7465 KMOD_TCPSTAT_INC(tcps_persistdrop);
7466 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
7467 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
7468 retval = -ETIMEDOUT; /* tcp_drop() */
7471 t_template = tcpip_maketemplate(rack->rc_inp);
7473 /* only set it if we were answered */
7474 if (rack->forced_ack == 0) {
7475 rack->forced_ack = 1;
7476 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
7478 rack->probe_not_answered = 1;
7479 counter_u64_add(rack_persists_loss, 1);
7480 rack->r_ctl.persist_lost_ends++;
7482 counter_u64_add(rack_persists_sends, 1);
7483 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
7484 tcp_respond(tp, t_template->tt_ipgen,
7485 &t_template->tt_t, (struct mbuf *)NULL,
7486 tp->rcv_nxt, tp->snd_una - 1, 0);
7487 /* This sends an ack */
7488 if (tp->t_flags & TF_DELACK)
7489 tp->t_flags &= ~TF_DELACK;
7490 free(t_template, M_TEMP);
7492 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
7495 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL);
7496 rack_start_hpts_timer(rack, tp, cts,
7502 * If a keepalive goes off, we had no other timers
7503 * happening. We always return 1 here since this
7504 * routine either drops the connection or sends
7505 * out a segment with respond.
7508 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
7510 struct tcptemp *t_template;
7511 struct inpcb *inp = tptoinpcb(tp);
7513 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
7514 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL);
7516 * Keep-alive timer went off; send something or drop connection if
7517 * idle for too long.
7519 KMOD_TCPSTAT_INC(tcps_keeptimeo);
7520 if (tp->t_state < TCPS_ESTABLISHED)
7522 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
7523 tp->t_state <= TCPS_CLOSING) {
7524 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
7527 * Send a packet designed to force a response if the peer is
7528 * up and reachable: either an ACK if the connection is
7529 * still alive, or an RST if the peer has closed the
7530 * connection due to timeout or reboot. Using sequence
7531 * number tp->snd_una-1 causes the transmitted zero-length
7532 * segment to lie outside the receive window; by the
7533 * protocol spec, this requires the correspondent TCP to
7536 KMOD_TCPSTAT_INC(tcps_keepprobe);
7537 t_template = tcpip_maketemplate(inp);
7539 if (rack->forced_ack == 0) {
7540 rack->forced_ack = 1;
7541 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
7543 rack->probe_not_answered = 1;
7545 tcp_respond(tp, t_template->tt_ipgen,
7546 &t_template->tt_t, (struct mbuf *)NULL,
7547 tp->rcv_nxt, tp->snd_una - 1, 0);
7548 free(t_template, M_TEMP);
7551 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
7554 KMOD_TCPSTAT_INC(tcps_keepdrops);
7555 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
7556 return (-ETIMEDOUT); /* tcp_drop() */
7560 * Retransmit helper function, clear up all the ack
7561 * flags and take care of important book keeping.
7564 rack_remxt_tmr(struct tcpcb *tp)
7567 * The retransmit timer went off, all sack'd blocks must be
7570 struct rack_sendmap *rsm, *trsm = NULL;
7571 struct tcp_rack *rack;
7573 rack = (struct tcp_rack *)tp->t_fb_ptr;
7574 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__);
7575 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL);
7576 if (rack->r_state && (rack->r_state != tp->t_state))
7577 rack_set_state(tp, rack);
7579 * Ideally we would like to be able to
7580 * mark SACK-PASS on anything not acked here.
7582 * However, if we do that we would burst out
7583 * all that data 1ms apart. This would be unwise,
7584 * so for now we will just let the normal rxt timer
7585 * and tlp timer take care of it.
7587 * Also we really need to stick them back in sequence
7588 * order. This way we send in the proper order and any
7589 * sacks that come floating in will "re-ack" the data.
7590 * To do this we zap the tmap with an INIT and then
7591 * walk through and place every rsm in the RB tree
7592 * back in its seq ordered place.
7594 TAILQ_INIT(&rack->r_ctl.rc_tmap);
7596 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) {
7598 if (rack_verbose_logging)
7599 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7600 /* We must re-add it back to the tlist */
7602 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7604 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
7608 if (rsm->r_flags & RACK_ACKED)
7609 rsm->r_flags |= RACK_WAS_ACKED;
7610 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED);
7611 rsm->r_flags |= RACK_MUST_RXT;
7613 /* Clear the count (we just un-acked them) */
7614 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una;
7615 rack->r_ctl.rc_sacked = 0;
7616 rack->r_ctl.rc_sacklast = NULL;
7617 rack->r_ctl.rc_agg_delayed = 0;
7619 rack->r_ctl.rc_agg_early = 0;
7621 /* Clear the tlp rtx mark */
7622 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh);
7623 if (rack->r_ctl.rc_resend != NULL)
7624 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT;
7625 rack->r_ctl.rc_prr_sndcnt = 0;
7626 rack_log_to_prr(rack, 6, 0, __LINE__);
7627 rack->r_timer_override = 1;
7628 if ((((tp->t_flags & TF_SACK_PERMIT) == 0)
7629 #ifdef TCP_SAD_DETECTION
7630 || (rack->sack_attack_disable != 0)
7632 ) && ((tp->t_flags & TF_SENTFIN) == 0)) {
7634 * For non-sack customers new data
7635 * needs to go out as retransmits until
7636 * we retransmit up to snd_max.
7638 rack->r_must_retran = 1;
7639 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp,
7640 rack->r_ctl.rc_sacked);
7642 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
7646 rack_convert_rtts(struct tcpcb *tp)
7648 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC);
7649 tp->t_rxtcur = RACK_REXMTVAL(tp);
7650 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
7651 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop);
7653 if (tp->t_rxtcur > rack_rto_max) {
7654 tp->t_rxtcur = rack_rto_max;
7659 rack_cc_conn_init(struct tcpcb *tp)
7661 struct tcp_rack *rack;
7664 rack = (struct tcp_rack *)tp->t_fb_ptr;
7668 * Now convert to rack's internal format,
7671 if ((srtt == 0) && (tp->t_srtt != 0))
7672 rack_convert_rtts(tp);
7674 * We want a chance to stay in slowstart as
7675 * we create a connection. TCP spec says that
7676 * initially ssthresh is infinite. For our
7677 * purposes that is the snd_wnd.
7679 if (tp->snd_ssthresh < tp->snd_wnd) {
7680 tp->snd_ssthresh = tp->snd_wnd;
7683 * We also want to assure a IW worth of
7684 * data can get inflight.
7686 if (rc_init_window(rack) < tp->snd_cwnd)
7687 tp->snd_cwnd = rc_init_window(rack);
7691 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
7692 * we will setup to retransmit the lowest seq number outstanding.
7695 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
7697 struct inpcb *inp = tptoinpcb(tp);
7702 if ((tp->t_flags & TF_GPUTINPROG) &&
7705 * We have had a second timeout
7706 * measurements on successive rxt's are not profitable.
7707 * It is unlikely to be of any use (the network is
7708 * broken or the client went away).
7710 tp->t_flags &= ~TF_GPUTINPROG;
7711 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
7712 rack->r_ctl.rc_gp_srtt /*flex1*/,
7714 0, 0, 18, __LINE__, NULL, 0);
7716 if (ctf_progress_timeout_check(tp, false)) {
7717 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
7718 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
7719 return (-ETIMEDOUT); /* tcp_drop() */
7721 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
7722 rack->r_ctl.retran_during_recovery = 0;
7723 rack->rc_ack_required = 1;
7724 rack->r_ctl.dsack_byte_cnt = 0;
7725 if (IN_FASTRECOVERY(tp->t_flags))
7726 tp->t_flags |= TF_WASFRECOVERY;
7728 tp->t_flags &= ~TF_WASFRECOVERY;
7729 if (IN_CONGRECOVERY(tp->t_flags))
7730 tp->t_flags |= TF_WASCRECOVERY;
7732 tp->t_flags &= ~TF_WASCRECOVERY;
7733 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
7734 (tp->snd_una == tp->snd_max)) {
7735 /* Nothing outstanding .. nothing to do */
7738 if (rack->r_ctl.dsack_persist) {
7739 rack->r_ctl.dsack_persist--;
7740 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
7741 rack->r_ctl.num_dsack = 0;
7743 rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
7746 * Rack can only run one timer at a time, so we cannot
7747 * run a KEEPINIT (gating SYN sending) and a retransmit
7748 * timer for the SYN. So if we are in a front state and
7749 * have a KEEPINIT timer we need to check the first transmit
7750 * against now to see if we have exceeded the KEEPINIT time
7753 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
7754 (TP_KEEPINIT(tp) != 0)) {
7755 struct rack_sendmap *rsm;
7757 rsm = tqhash_min(rack->r_ctl.tqh);
7759 /* Ok we have something outstanding to test keepinit with */
7760 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) &&
7761 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) {
7762 /* We have exceeded the KEEPINIT time */
7763 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
7769 * Retransmission timer went off. Message has not been acked within
7770 * retransmit interval. Back off to a longer retransmit interval
7771 * and retransmit one segment.
7774 if ((rack->r_ctl.rc_resend == NULL) ||
7775 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) {
7777 * If the rwnd collapsed on
7778 * the one we are retransmitting
7779 * it does not count against the
7784 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) {
7785 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
7787 tp->t_rxtshift = TCP_MAXRXTSHIFT;
7788 KMOD_TCPSTAT_INC(tcps_timeoutdrop);
7789 /* XXXGL: previously t_softerror was casted to uint16_t */
7790 MPASS(tp->t_softerror >= 0);
7791 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT;
7792 goto out; /* tcp_drop() */
7794 if (tp->t_state == TCPS_SYN_SENT) {
7796 * If the SYN was retransmitted, indicate CWND to be limited
7797 * to 1 segment in cc_conn_init().
7800 } else if (tp->t_rxtshift == 1) {
7802 * first retransmit; record ssthresh and cwnd so they can be
7803 * recovered if this turns out to be a "bad" retransmit. A
7804 * retransmit is considered "bad" if an ACK for this segment
7805 * is received within RTT/2 interval; the assumption here is
7806 * that the ACK was already in flight. See "On Estimating
7807 * End-to-End Network Path Properties" by Allman and Paxson
7810 tp->snd_cwnd_prev = tp->snd_cwnd;
7811 tp->snd_ssthresh_prev = tp->snd_ssthresh;
7812 tp->snd_recover_prev = tp->snd_recover;
7813 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2);
7814 tp->t_flags |= TF_PREVVALID;
7815 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0)
7816 tp->t_flags &= ~TF_PREVVALID;
7817 KMOD_TCPSTAT_INC(tcps_rexmttimeo);
7818 if ((tp->t_state == TCPS_SYN_SENT) ||
7819 (tp->t_state == TCPS_SYN_RECEIVED))
7820 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift];
7822 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift];
7824 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt,
7825 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop);
7827 * We enter the path for PLMTUD if connection is established or, if
7828 * connection is FIN_WAIT_1 status, reason for the last is that if
7829 * amount of data we send is very small, we could send it in couple
7830 * of packets and process straight to FIN. In that case we won't
7831 * catch ESTABLISHED state.
7834 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false;
7838 if (((V_tcp_pmtud_blackhole_detect == 1) ||
7839 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) ||
7840 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) &&
7841 ((tp->t_state == TCPS_ESTABLISHED) ||
7842 (tp->t_state == TCPS_FIN_WAIT_1))) {
7844 * Idea here is that at each stage of mtu probe (usually,
7845 * 1448 -> 1188 -> 524) should be given 2 chances to recover
7846 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
7847 * should take care of that.
7849 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
7850 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
7851 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
7852 tp->t_rxtshift % 2 == 0)) {
7854 * Enter Path MTU Black-hole Detection mechanism: -
7855 * Disable Path MTU Discovery (IP "DF" bit). -
7856 * Reduce MTU to lower value than what we negotiated
7859 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
7860 /* Record that we may have found a black hole. */
7861 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
7862 /* Keep track of previous MSS. */
7863 tp->t_pmtud_saved_maxseg = tp->t_maxseg;
7867 * Reduce the MSS to blackhole value or to the
7868 * default in an attempt to retransmit.
7872 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
7873 /* Use the sysctl tuneable blackhole MSS. */
7874 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
7875 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
7876 } else if (isipv6) {
7877 /* Use the default MSS. */
7878 tp->t_maxseg = V_tcp_v6mssdflt;
7880 * Disable Path MTU Discovery when we switch
7883 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
7884 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
7887 #if defined(INET6) && defined(INET)
7891 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
7892 /* Use the sysctl tuneable blackhole MSS. */
7893 tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
7894 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
7896 /* Use the default MSS. */
7897 tp->t_maxseg = V_tcp_mssdflt;
7899 * Disable Path MTU Discovery when we switch
7902 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
7903 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
7908 * If further retransmissions are still unsuccessful
7909 * with a lowered MTU, maybe this isn't a blackhole
7910 * and we restore the previous MSS and blackhole
7911 * detection flags. The limit '6' is determined by
7912 * giving each probe stage (1448, 1188, 524) 2
7913 * chances to recover.
7915 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
7916 (tp->t_rxtshift >= 6)) {
7917 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
7918 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
7919 tp->t_maxseg = tp->t_pmtud_saved_maxseg;
7920 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed);
7925 * Disable RFC1323 and SACK if we haven't got any response to
7926 * our third SYN to work-around some broken terminal servers
7927 * (most of which have hopefully been retired) that have bad VJ
7928 * header compression code which trashes TCP segments containing
7929 * unknown-to-them TCP options.
7931 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
7932 (tp->t_rxtshift == 3))
7933 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT);
7935 * If we backed off this far, our srtt estimate is probably bogus.
7936 * Clobber it so we'll take the next rtt measurement as our srtt;
7937 * move the current srtt into rttvar to keep the current retransmit
7940 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
7942 if ((inp->inp_vflag & INP_IPV6) != 0)
7947 tp->t_rttvar += tp->t_srtt;
7950 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
7951 tp->snd_recover = tp->snd_max;
7952 tp->t_flags |= TF_ACKNOW;
7954 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__);
7960 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp)
7963 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
7965 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
7966 (tp->t_flags & TF_GPUTINPROG)) {
7968 * We have a goodput in progress
7969 * and we have entered a late state.
7970 * Do we have enough data in the sb
7971 * to handle the GPUT request?
7975 bytes = tp->gput_ack - tp->gput_seq;
7976 if (SEQ_GT(tp->gput_seq, tp->snd_una))
7977 bytes += tp->gput_seq - tp->snd_una;
7978 if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
7980 * There are not enough bytes in the socket
7981 * buffer that have been sent to cover this
7982 * measurement. Cancel it.
7984 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
7985 rack->r_ctl.rc_gp_srtt /*flex1*/,
7987 0, 0, 18, __LINE__, NULL, 0);
7988 tp->t_flags &= ~TF_GPUTINPROG;
7994 if (tp->t_state == TCPS_LISTEN) {
7995 /* no timers on listen sockets */
7996 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
8000 if ((timers & PACE_TMR_RACK) &&
8001 rack->rc_on_min_to) {
8003 * For the rack timer when we
8004 * are on a min-timeout (which means rrr_conf = 3)
8005 * we don't want to check the timer. It may
8006 * be going off for a pace and thats ok we
8007 * want to send the retransmit (if its ready).
8009 * If its on a normal rack timer (non-min) then
8010 * we will check if its expired.
8012 goto skip_time_check;
8014 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
8017 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
8019 rack_log_to_processing(rack, cts, ret, 0);
8022 if (hpts_calling == 0) {
8024 * A user send or queued mbuf (sack) has called us? We
8025 * return 0 and let the pacing guards
8026 * deal with it if they should or
8027 * should not cause a send.
8030 rack_log_to_processing(rack, cts, ret, 0);
8034 * Ok our timer went off early and we are not paced false
8035 * alarm, go back to sleep. We make sure we don't have
8036 * no-sack wakeup on since we no longer have a PKT_OUTPUT
8039 rack->rc_inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
8041 left = rack->r_ctl.rc_timer_exp - cts;
8042 tcp_hpts_insert(tptoinpcb(tp), HPTS_MS_TO_SLOTS(left));
8043 rack_log_to_processing(rack, cts, ret, left);
8047 rack->rc_tmr_stopped = 0;
8048 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
8049 if (timers & PACE_TMR_DELACK) {
8050 ret = rack_timeout_delack(tp, rack, cts);
8051 } else if (timers & PACE_TMR_RACK) {
8052 rack->r_ctl.rc_tlp_rxt_last_time = cts;
8053 rack->r_fast_output = 0;
8054 ret = rack_timeout_rack(tp, rack, cts);
8055 } else if (timers & PACE_TMR_TLP) {
8056 rack->r_ctl.rc_tlp_rxt_last_time = cts;
8057 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp);
8058 } else if (timers & PACE_TMR_RXT) {
8059 rack->r_ctl.rc_tlp_rxt_last_time = cts;
8060 rack->r_fast_output = 0;
8061 ret = rack_timeout_rxt(tp, rack, cts);
8062 } else if (timers & PACE_TMR_PERSIT) {
8063 ret = rack_timeout_persist(tp, rack, cts);
8064 } else if (timers & PACE_TMR_KEEP) {
8065 ret = rack_timeout_keepalive(tp, rack, cts);
8067 rack_log_to_processing(rack, cts, ret, timers);
8072 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
8075 uint32_t us_cts, flags_on_entry;
8076 uint8_t hpts_removed = 0;
8078 flags_on_entry = rack->r_ctl.rc_hpts_flags;
8079 us_cts = tcp_get_usecs(&tv);
8080 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
8081 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) ||
8082 ((tp->snd_max - tp->snd_una) == 0))) {
8083 tcp_hpts_remove(rack->rc_inp);
8085 /* If we were not delayed cancel out the flag. */
8086 if ((tp->snd_max - tp->snd_una) == 0)
8087 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
8088 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
8090 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
8091 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
8092 if (tcp_in_hpts(rack->rc_inp) &&
8093 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
8095 * Canceling timer's when we have no output being
8096 * paced. We also must remove ourselves from the
8099 tcp_hpts_remove(rack->rc_inp);
8102 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
8104 if (hpts_removed == 0)
8105 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
8109 rack_stopall(struct tcpcb *tp)
8111 struct tcp_rack *rack;
8112 rack = (struct tcp_rack *)tp->t_fb_ptr;
8113 rack->t_timers_stopped = 1;
8118 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack)
8121 * Assure no timers are running.
8123 if (tcp_timer_active(tp, TT_PERSIST)) {
8124 /* We enter in persists, set the flag appropriately */
8125 rack->rc_in_persist = 1;
8127 if (tcp_in_hpts(rack->rc_inp)) {
8128 tcp_hpts_remove(rack->rc_inp);
8133 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
8134 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag, int segsiz)
8139 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8141 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
8142 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
8143 rsm->r_flags |= RACK_OVERMAX;
8145 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) {
8146 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
8147 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
8149 idx = rsm->r_rtr_cnt - 1;
8150 rsm->r_tim_lastsent[idx] = ts;
8152 * Here we don't add in the len of send, since its already
8153 * in snduna <->snd_max.
8155 rsm->r_fas = ctf_flight_size(rack->rc_tp,
8156 rack->r_ctl.rc_sacked);
8157 if (rsm->r_flags & RACK_ACKED) {
8158 /* Problably MTU discovery messing with us */
8159 rsm->r_flags &= ~RACK_ACKED;
8160 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
8162 if (rsm->r_in_tmap) {
8163 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8166 /* Lets make sure it really is in or not the GP window */
8167 rack_mark_in_gp_win(tp, rsm);
8168 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8170 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz);
8171 /* Take off the must retransmit flag, if its on */
8172 if (rsm->r_flags & RACK_MUST_RXT) {
8173 if (rack->r_must_retran)
8174 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
8175 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
8177 * We have retransmitted all we need. Clear
8178 * any must retransmit flags.
8180 rack->r_must_retran = 0;
8181 rack->r_ctl.rc_out_at_rto = 0;
8183 rsm->r_flags &= ~RACK_MUST_RXT;
8185 /* Remove any collapsed flag */
8186 rsm->r_flags &= ~RACK_RWND_COLLAPSED;
8187 if (rsm->r_flags & RACK_SACK_PASSED) {
8188 /* We have retransmitted due to the SACK pass */
8189 rsm->r_flags &= ~RACK_SACK_PASSED;
8190 rsm->r_flags |= RACK_WAS_SACKPASS;
8195 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
8196 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag, int segsiz)
8199 * We (re-)transmitted starting at rsm->r_start for some length
8200 * (possibly less than r_end.
8202 struct rack_sendmap *nrsm;
8203 int insret __diagused;
8208 c_end = rsm->r_start + len;
8209 if (SEQ_GEQ(c_end, rsm->r_end)) {
8211 * We retransmitted the whole piece or more than the whole
8212 * slopping into the next rsm.
8214 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz);
8215 if (c_end == rsm->r_end) {
8221 /* Hangs over the end return whats left */
8222 act_len = rsm->r_end - rsm->r_start;
8223 *lenp = (len - act_len);
8224 return (rsm->r_end);
8226 /* We don't get out of this block. */
8229 * Here we retransmitted less than the whole thing which means we
8230 * have to split this into what was transmitted and what was not.
8232 nrsm = rack_alloc_full_limit(rack);
8235 * We can't get memory, so lets not proceed.
8241 * So here we are going to take the original rsm and make it what we
8242 * retransmitted. nrsm will be the tail portion we did not
8243 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
8244 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
8245 * 1, 6 and the new piece will be 6, 11.
8247 rack_clone_rsm(rack, nrsm, rsm, c_end);
8249 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
8251 (void)tqhash_insert(rack->r_ctl.tqh, nrsm);
8253 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
8254 panic("Insert in rb tree of %p fails ret:%d rack:%p rsm:%p",
8255 nrsm, insret, rack, rsm);
8258 if (rsm->r_in_tmap) {
8259 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8260 nrsm->r_in_tmap = 1;
8262 rsm->r_flags &= (~RACK_HAS_FIN);
8263 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz);
8264 /* Log a split of rsm into rsm and nrsm */
8265 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
8271 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
8272 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts,
8273 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb,
8274 uint32_t s_moff, int hw_tls, int segsiz)
8276 struct tcp_rack *rack;
8277 struct rack_sendmap *rsm, *nrsm;
8278 int insret __diagused;
8280 register uint32_t snd_max, snd_una;
8283 * Add to the RACK log of packets in flight or retransmitted. If
8284 * there is a TS option we will use the TS echoed, if not we will
8287 * Retransmissions will increment the count and move the ts to its
8288 * proper place. Note that if options do not include TS's then we
8289 * won't be able to effectively use the ACK for an RTT on a retran.
8291 * Notes about r_start and r_end. Lets consider a send starting at
8292 * sequence 1 for 10 bytes. In such an example the r_start would be
8293 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
8294 * This means that r_end is actually the first sequence for the next
8299 * If err is set what do we do XXXrrs? should we not add the thing?
8300 * -- i.e. return if err != 0 or should we pretend we sent it? --
8301 * i.e. proceed with add ** do this for now.
8303 INP_WLOCK_ASSERT(tptoinpcb(tp));
8306 * We don't log errors -- we could but snd_max does not
8307 * advance in this case either.
8311 if (th_flags & TH_RST) {
8313 * We don't log resets and we return immediately from
8318 rack = (struct tcp_rack *)tp->t_fb_ptr;
8319 snd_una = tp->snd_una;
8320 snd_max = tp->snd_max;
8321 if (th_flags & (TH_SYN | TH_FIN)) {
8323 * The call to rack_log_output is made before bumping
8324 * snd_max. This means we can record one extra byte on a SYN
8325 * or FIN if seq_out is adding more on and a FIN is present
8326 * (and we are not resending).
8328 if ((th_flags & TH_SYN) && (seq_out == tp->iss))
8330 if (th_flags & TH_FIN)
8332 if (SEQ_LT(snd_max, tp->snd_nxt)) {
8334 * The add/update as not been done for the FIN/SYN
8337 snd_max = tp->snd_nxt;
8340 if (SEQ_LEQ((seq_out + len), snd_una)) {
8341 /* Are sending an old segment to induce an ack (keep-alive)? */
8344 if (SEQ_LT(seq_out, snd_una)) {
8345 /* huh? should we panic? */
8348 end = seq_out + len;
8350 if (SEQ_GEQ(end, seq_out))
8351 len = end - seq_out;
8356 /* We don't log zero window probes */
8359 if (IN_FASTRECOVERY(tp->t_flags)) {
8360 rack->r_ctl.rc_prr_out += len;
8362 /* First question is it a retransmission or new? */
8363 if (seq_out == snd_max) {
8365 rack_chk_http_and_hybrid_on_out(rack, seq_out, len, cts);
8367 rsm = rack_alloc(rack);
8370 * Hmm out of memory and the tcb got destroyed while
8375 if (th_flags & TH_FIN) {
8376 rsm->r_flags = RACK_HAS_FIN|add_flag;
8378 rsm->r_flags = add_flag;
8382 rsm->r_tim_lastsent[0] = cts;
8384 rsm->r_rtr_bytes = 0;
8385 if (th_flags & TH_SYN) {
8386 /* The data space is one beyond snd_una */
8387 rsm->r_flags |= RACK_HAS_SYN;
8389 rsm->r_start = seq_out;
8390 rsm->r_end = rsm->r_start + len;
8391 rack_mark_in_gp_win(tp, rsm);
8394 * save off the mbuf location that
8395 * sndmbuf_noadv returned (which is
8396 * where we started copying from)..
8401 * Here we do add in the len of send, since its not yet
8402 * reflected in in snduna <->snd_max
8404 rsm->r_fas = (ctf_flight_size(rack->rc_tp,
8405 rack->r_ctl.rc_sacked) +
8406 (rsm->r_end - rsm->r_start));
8407 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */
8409 if (rsm->m->m_len <= rsm->soff) {
8411 * XXXrrs Question, will this happen?
8413 * If sbsndptr is set at the correct place
8414 * then s_moff should always be somewhere
8415 * within rsm->m. But if the sbsndptr was
8416 * off then that won't be true. If it occurs
8417 * we need to walkout to the correct location.
8422 while (lm->m_len <= rsm->soff) {
8423 rsm->soff -= lm->m_len;
8425 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u",
8426 __func__, rack, s_moff, s_mb, rsm->soff));
8430 rsm->orig_m_len = rsm->m->m_len;
8431 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
8433 rsm->orig_m_len = 0;
8434 rsm->orig_t_space = 0;
8436 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz);
8437 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8439 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__);
8441 (void)tqhash_insert(rack->r_ctl.tqh, rsm);
8443 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) {
8444 panic("Insert in rb tree of %p fails ret:%d rack:%p rsm:%p",
8445 nrsm, insret, rack, rsm);
8448 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8451 * Special case detection, is there just a single
8452 * packet outstanding when we are not in recovery?
8454 * If this is true mark it so.
8456 if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
8457 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) {
8458 struct rack_sendmap *prsm;
8460 prsm = tqhash_prev(rack->r_ctl.tqh, rsm);
8462 prsm->r_one_out_nr = 1;
8467 * If we reach here its a retransmission and we need to find it.
8470 if (hintrsm && (hintrsm->r_start == seq_out)) {
8474 /* No hints sorry */
8477 if ((rsm) && (rsm->r_start == seq_out)) {
8478 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz);
8485 /* Ok it was not the last pointer go through it the hard way. */
8487 rsm = tqhash_find(rack->r_ctl.tqh, seq_out);
8489 if (rsm->r_start == seq_out) {
8490 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz);
8497 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
8498 /* Transmitted within this piece */
8500 * Ok we must split off the front and then let the
8501 * update do the rest
8503 nrsm = rack_alloc_full_limit(rack);
8505 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz);
8509 * copy rsm to nrsm and then trim the front of rsm
8510 * to not include this part.
8512 rack_clone_rsm(rack, nrsm, rsm, seq_out);
8513 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
8515 (void)tqhash_insert(rack->r_ctl.tqh, nrsm);
8517 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
8518 panic("Insert in rb tree of %p fails ret:%d rack:%p rsm:%p",
8519 nrsm, insret, rack, rsm);
8522 if (rsm->r_in_tmap) {
8523 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8524 nrsm->r_in_tmap = 1;
8526 rsm->r_flags &= (~RACK_HAS_FIN);
8527 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz);
8535 * Hmm not found in map did they retransmit both old and on into the
8538 if (seq_out == tp->snd_max) {
8540 } else if (SEQ_LT(seq_out, tp->snd_max)) {
8542 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
8543 seq_out, len, tp->snd_una, tp->snd_max);
8544 printf("Starting Dump of all rack entries\n");
8545 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) {
8546 printf("rsm:%p start:%u end:%u\n",
8547 rsm, rsm->r_start, rsm->r_end);
8549 printf("Dump complete\n");
8550 panic("seq_out not found rack:%p tp:%p",
8556 * Hmm beyond sndmax? (only if we are using the new rtt-pack
8559 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
8560 seq_out, len, tp->snd_max, tp);
8566 * Record one of the RTT updates from an ack into
8567 * our sample structure.
8571 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt,
8572 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt)
8574 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
8575 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
8576 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
8578 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
8579 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
8580 rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
8582 if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
8583 if (us_rtt < rack->r_ctl.rc_gp_lowrtt)
8584 rack->r_ctl.rc_gp_lowrtt = us_rtt;
8585 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd)
8586 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
8588 if ((confidence == 1) &&
8590 (rsm->r_just_ret) ||
8591 (rsm->r_one_out_nr &&
8592 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) {
8594 * If the rsm had a just return
8595 * hit it then we can't trust the
8596 * rtt measurement for buffer deterimination
8597 * Note that a confidence of 2, indicates
8598 * SACK'd which overrides the r_just_ret or
8599 * the r_one_out_nr. If it was a CUM-ACK and
8600 * we had only two outstanding, but get an
8601 * ack for only 1. Then that also lowers our
8606 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
8607 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) {
8608 if (rack->r_ctl.rack_rs.confidence == 0) {
8610 * We take anything with no current confidence
8613 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
8614 rack->r_ctl.rack_rs.confidence = confidence;
8615 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
8616 } else if (confidence != 0) {
8618 * Once we have a confident number,
8619 * we can update it with a smaller
8620 * value since this confident number
8621 * may include the DSACK time until
8622 * the next segment (the second one) arrived.
8624 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
8625 rack->r_ctl.rack_rs.confidence = confidence;
8626 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
8629 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence);
8630 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
8631 rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
8632 rack->r_ctl.rack_rs.rs_rtt_cnt++;
8636 * Collect new round-trip time estimate
8637 * and update averages and current timeout.
8640 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
8645 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
8646 /* No valid sample */
8648 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
8649 /* We are to use the lowest RTT seen in a single ack */
8650 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
8651 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
8652 /* We are to use the highest RTT seen in a single ack */
8653 rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
8654 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
8655 /* We are to use the average RTT seen in a single ack */
8656 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
8657 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
8660 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
8666 if (rack->rc_gp_rtt_set == 0) {
8668 * With no RTT we have to accept
8669 * even one we are not confident of.
8671 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt;
8672 rack->rc_gp_rtt_set = 1;
8673 } else if (rack->r_ctl.rack_rs.confidence) {
8674 /* update the running gp srtt */
8675 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8);
8676 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8;
8678 if (rack->r_ctl.rack_rs.confidence) {
8680 * record the low and high for highly buffered path computation,
8681 * we only do this if we are confident (not a retransmission).
8683 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) {
8684 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
8686 if (rack->rc_highly_buffered == 0) {
8688 * Currently once we declare a path has
8689 * highly buffered there is no going
8690 * back, which may be a problem...
8692 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) {
8693 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt,
8694 rack->r_ctl.rc_highest_us_rtt,
8695 rack->r_ctl.rc_lowest_us_rtt,
8697 rack->rc_highly_buffered = 1;
8701 if ((rack->r_ctl.rack_rs.confidence) ||
8702 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) {
8704 * If we are highly confident of it <or> it was
8705 * never retransmitted we accept it as the last us_rtt.
8707 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
8708 /* The lowest rtt can be set if its was not retransmited */
8709 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) {
8710 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
8711 if (rack->r_ctl.rc_lowest_us_rtt == 0)
8712 rack->r_ctl.rc_lowest_us_rtt = 1;
8715 rack = (struct tcp_rack *)tp->t_fb_ptr;
8716 if (tp->t_srtt != 0) {
8718 * We keep a simple srtt in microseconds, like our rtt
8719 * measurement. We don't need to do any tricks with shifting
8720 * etc. Instead we just add in 1/8th of the new measurement
8721 * and subtract out 1/8 of the old srtt. We do the same with
8722 * the variance after finding the absolute value of the
8723 * difference between this sample and the current srtt.
8725 delta = tp->t_srtt - rtt;
8726 /* Take off 1/8th of the current sRTT */
8727 tp->t_srtt -= (tp->t_srtt >> 3);
8728 /* Add in 1/8th of the new RTT just measured */
8729 tp->t_srtt += (rtt >> 3);
8730 if (tp->t_srtt <= 0)
8732 /* Now lets make the absolute value of the variance */
8735 /* Subtract out 1/8th */
8736 tp->t_rttvar -= (tp->t_rttvar >> 3);
8737 /* Add in 1/8th of the new variance we just saw */
8738 tp->t_rttvar += (delta >> 3);
8739 if (tp->t_rttvar <= 0)
8743 * No rtt measurement yet - use the unsmoothed rtt. Set the
8744 * variance to half the rtt (so our first retransmit happens
8748 tp->t_rttvar = rtt >> 1;
8750 rack->rc_srtt_measure_made = 1;
8751 KMOD_TCPSTAT_INC(tcps_rttupdated);
8752 if (tp->t_rttupdated < UCHAR_MAX)
8755 if (rack_stats_gets_ms_rtt == 0) {
8756 /* Send in the microsecond rtt used for rxt timeout purposes */
8757 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
8758 } else if (rack_stats_gets_ms_rtt == 1) {
8759 /* Send in the millisecond rtt used for rxt timeout purposes */
8763 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
8764 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
8765 } else if (rack_stats_gets_ms_rtt == 2) {
8766 /* Send in the millisecond rtt has close to the path RTT as we can get */
8770 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
8771 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
8773 /* Send in the microsecond rtt has close to the path RTT as we can get */
8774 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt));
8776 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt));
8779 * the retransmit should happen at rtt + 4 * rttvar. Because of the
8780 * way we do the smoothing, srtt and rttvar will each average +1/2
8781 * tick of bias. When we compute the retransmit timer, we want 1/2
8782 * tick of rounding and 1 extra tick because of +-1/2 tick
8783 * uncertainty in the firing of the timer. The bias will give us
8784 * exactly the 1.5 tick we need. But, because the bias is
8785 * statistical, we have to test that we don't drop below the minimum
8786 * feasible timer (which is 2 ticks).
8789 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
8790 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop);
8791 rack_log_rtt_sample(rack, rtt);
8792 tp->t_softerror = 0;
8797 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts)
8800 * Apply to filter the inbound us-rtt at us_cts.
8804 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
8805 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt,
8807 if (old_rtt > us_rtt) {
8808 /* We just hit a new lower rtt time */
8809 rack_log_rtt_shrinks(rack, us_cts, old_rtt,
8810 __LINE__, RACK_RTTS_NEWRTT);
8812 * Only count it if its lower than what we saw within our
8815 if ((old_rtt - us_rtt) > rack_min_rtt_movement) {
8816 if (rack_probertt_lower_within &&
8817 rack->rc_gp_dyn_mul &&
8818 (rack->use_fixed_rate == 0) &&
8819 (rack->rc_always_pace)) {
8821 * We are seeing a new lower rtt very close
8822 * to the time that we would have entered probe-rtt.
8823 * This is probably due to the fact that a peer flow
8824 * has entered probe-rtt. Lets go in now too.
8828 val = rack_probertt_lower_within * rack_time_between_probertt;
8830 if ((rack->in_probe_rtt == 0) &&
8831 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) {
8832 rack_enter_probertt(rack, us_cts);
8835 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
8841 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
8842 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack)
8846 uint32_t t, len_acked;
8848 if ((rsm->r_flags & RACK_ACKED) ||
8849 (rsm->r_flags & RACK_WAS_ACKED))
8852 if (rsm->r_no_rtt_allowed) {
8856 if (ack_type == CUM_ACKED) {
8857 if (SEQ_GT(th_ack, rsm->r_end)) {
8858 len_acked = rsm->r_end - rsm->r_start;
8861 len_acked = th_ack - rsm->r_start;
8865 len_acked = rsm->r_end - rsm->r_start;
8868 if (rsm->r_rtr_cnt == 1) {
8870 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
8873 if (!tp->t_rttlow || tp->t_rttlow > t)
8875 if (!rack->r_ctl.rc_rack_min_rtt ||
8876 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
8877 rack->r_ctl.rc_rack_min_rtt = t;
8878 if (rack->r_ctl.rc_rack_min_rtt == 0) {
8879 rack->r_ctl.rc_rack_min_rtt = 1;
8882 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]))
8883 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8885 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8888 if (CC_ALGO(tp)->rttsample != NULL) {
8889 /* Kick the RTT to the CC */
8890 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas);
8892 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
8893 if (ack_type == SACKED) {
8894 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1);
8895 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt);
8898 * We need to setup what our confidence
8901 * If the rsm was app limited and it is
8902 * less than a mss in length (the end
8903 * of the send) then we have a gap. If we
8904 * were app limited but say we were sending
8905 * multiple MSS's then we are more confident
8908 * When we are not app-limited then we see if
8909 * the rsm is being included in the current
8910 * measurement, we tell this by the app_limited_needs_set
8913 * Note that being cwnd blocked is not applimited
8914 * as well as the pacing delay between packets which
8915 * are sending only 1 or 2 MSS's also will show up
8916 * in the RTT. We probably need to examine this algorithm
8917 * a bit more and enhance it to account for the delay
8918 * between rsm's. We could do that by saving off the
8919 * pacing delay of each rsm (in an rsm) and then
8920 * factoring that in somehow though for now I am
8925 if (rsm->r_flags & RACK_APP_LIMITED) {
8926 if (all && (len_acked <= ctf_fixed_maxseg(tp)))
8930 } else if (rack->app_limited_needs_set == 0) {
8935 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2);
8936 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt,
8937 calc_conf, rsm, rsm->r_rtr_cnt);
8939 if ((rsm->r_flags & RACK_TLP) &&
8940 (!IN_FASTRECOVERY(tp->t_flags))) {
8941 /* Segment was a TLP and our retrans matched */
8942 if (rack->r_ctl.rc_tlp_cwnd_reduce) {
8943 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
8946 if ((rack->r_ctl.rc_rack_tmit_time == 0) ||
8947 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
8948 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) {
8949 /* New more recent rack_tmit_time */
8950 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
8951 if (rack->r_ctl.rc_rack_tmit_time == 0)
8952 rack->r_ctl.rc_rack_tmit_time = 1;
8953 rack->rc_rack_rtt = t;
8958 * We clear the soft/rxtshift since we got an ack.
8959 * There is no assurance we will call the commit() function
8960 * so we need to clear these to avoid incorrect handling.
8963 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
8964 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
8965 tp->t_softerror = 0;
8966 if (to && (to->to_flags & TOF_TS) &&
8967 (ack_type == CUM_ACKED) &&
8969 ((rsm->r_flags & RACK_OVERMAX) == 0)) {
8971 * Now which timestamp does it match? In this block the ACK
8972 * must be coming from a previous transmission.
8974 for (i = 0; i < rsm->r_rtr_cnt; i++) {
8975 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) {
8976 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
8979 if (CC_ALGO(tp)->rttsample != NULL) {
8981 * Kick the RTT to the CC, here
8982 * we lie a bit in that we know the
8983 * retransmission is correct even though
8984 * we retransmitted. This is because
8985 * we match the timestamps.
8987 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i]))
8988 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i];
8990 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i];
8991 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas);
8993 if ((i + 1) < rsm->r_rtr_cnt) {
8995 * The peer ack'd from our previous
8996 * transmission. We have a spurious
8997 * retransmission and thus we dont
8998 * want to update our rack_rtt.
9000 * Hmm should there be a CC revert here?
9005 if (!tp->t_rttlow || tp->t_rttlow > t)
9007 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
9008 rack->r_ctl.rc_rack_min_rtt = t;
9009 if (rack->r_ctl.rc_rack_min_rtt == 0) {
9010 rack->r_ctl.rc_rack_min_rtt = 1;
9013 if ((rack->r_ctl.rc_rack_tmit_time == 0) ||
9014 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
9015 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) {
9016 /* New more recent rack_tmit_time */
9017 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
9018 if (rack->r_ctl.rc_rack_tmit_time == 0)
9019 rack->r_ctl.rc_rack_tmit_time = 1;
9020 rack->rc_rack_rtt = t;
9022 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3);
9023 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm,
9028 /* If we are logging log out the sendmap */
9029 if (tcp_bblogging_on(rack->rc_tp)) {
9030 for (i = 0; i < rsm->r_rtr_cnt; i++) {
9031 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr);
9037 * Ok its a SACK block that we retransmitted. or a windows
9038 * machine without timestamps. We can tell nothing from the
9039 * time-stamp since its not there or the time the peer last
9040 * recieved a segment that moved forward its cum-ack point.
9043 i = rsm->r_rtr_cnt - 1;
9044 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
9047 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
9049 * We retransmitted and the ack came back in less
9050 * than the smallest rtt we have observed. We most
9051 * likely did an improper retransmit as outlined in
9052 * 6.2 Step 2 point 2 in the rack-draft so we
9053 * don't want to update our rack_rtt. We in
9054 * theory (in future) might want to think about reverting our
9055 * cwnd state but we won't for now.
9058 } else if (rack->r_ctl.rc_rack_min_rtt) {
9060 * We retransmitted it and the retransmit did the
9063 if (!rack->r_ctl.rc_rack_min_rtt ||
9064 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
9065 rack->r_ctl.rc_rack_min_rtt = t;
9066 if (rack->r_ctl.rc_rack_min_rtt == 0) {
9067 rack->r_ctl.rc_rack_min_rtt = 1;
9070 if ((rack->r_ctl.rc_rack_tmit_time == 0) ||
9071 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
9072 (uint32_t)rsm->r_tim_lastsent[i]))) {
9073 /* New more recent rack_tmit_time */
9074 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i];
9075 if (rack->r_ctl.rc_rack_tmit_time == 0)
9076 rack->r_ctl.rc_rack_tmit_time = 1;
9077 rack->rc_rack_rtt = t;
9086 * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
9089 rack_log_sack_passed(struct tcpcb *tp,
9090 struct tcp_rack *rack, struct rack_sendmap *rsm)
9092 struct rack_sendmap *nrsm;
9095 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
9096 rack_head, r_tnext) {
9098 /* Skip original segment he is acked */
9101 if (nrsm->r_flags & RACK_ACKED) {
9103 * Skip ack'd segments, though we
9104 * should not see these, since tmap
9105 * should not have ack'd segments.
9109 if (nrsm->r_flags & RACK_RWND_COLLAPSED) {
9111 * If the peer dropped the rwnd on
9112 * these then we don't worry about them.
9116 if (nrsm->r_flags & RACK_SACK_PASSED) {
9118 * We found one that is already marked
9119 * passed, we have been here before and
9120 * so all others below this are marked.
9124 nrsm->r_flags |= RACK_SACK_PASSED;
9125 nrsm->r_flags &= ~RACK_WAS_SACKPASS;
9130 rack_need_set_test(struct tcpcb *tp,
9131 struct tcp_rack *rack,
9132 struct rack_sendmap *rsm,
9137 struct rack_sendmap *s_rsm;
9139 if ((tp->t_flags & TF_GPUTINPROG) &&
9140 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
9142 * We were app limited, and this ack
9143 * butts up or goes beyond the point where we want
9144 * to start our next measurement. We need
9145 * to record the new gput_ts as here and
9146 * possibly update the start sequence.
9150 if (rsm->r_rtr_cnt > 1) {
9152 * This is a retransmit, can we
9153 * really make any assessment at this
9154 * point? We are not really sure of
9155 * the timestamp, is it this or the
9156 * previous transmission?
9158 * Lets wait for something better that
9159 * is not retransmitted.
9165 rack->app_limited_needs_set = 0;
9166 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
9167 /* Do we start at a new end? */
9168 if ((use_which == RACK_USE_BEG) &&
9169 SEQ_GEQ(rsm->r_start, tp->gput_seq)) {
9171 * When we get an ACK that just eats
9172 * up some of the rsm, we set RACK_USE_BEG
9173 * since whats at r_start (i.e. th_ack)
9174 * is left unacked and thats where the
9175 * measurement now starts.
9177 tp->gput_seq = rsm->r_start;
9179 if ((use_which == RACK_USE_END) &&
9180 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
9182 * We use the end when the cumack
9183 * is moving forward and completely
9184 * deleting the rsm passed so basically
9185 * r_end holds th_ack.
9187 * For SACK's we also want to use the end
9188 * since this piece just got sacked and
9189 * we want to target anything after that
9190 * in our measurement.
9192 tp->gput_seq = rsm->r_end;
9194 if (use_which == RACK_USE_END_OR_THACK) {
9196 * special case for ack moving forward,
9197 * not a sack, we need to move all the
9198 * way up to where this ack cum-ack moves
9201 if (SEQ_GT(th_ack, rsm->r_end))
9202 tp->gput_seq = th_ack;
9204 tp->gput_seq = rsm->r_end;
9206 if (SEQ_LT(tp->gput_seq, tp->snd_max))
9207 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
9211 * Pick up the correct send time if we can the rsm passed in
9212 * may be equal to s_rsm if the RACK_USE_BEG was set. For the other
9213 * two cases (RACK_USE_THACK or RACK_USE_END) most likely we will
9214 * find a different seq i.e. the next send up.
9216 * If that has not been sent, s_rsm will be NULL and we must
9217 * arrange it so this function will get called again by setting
9218 * app_limited_needs_set.
9221 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0];
9223 /* If we hit here we have to have *not* sent tp->gput_seq */
9224 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0];
9225 /* Set it up so we will go through here again */
9226 rack->app_limited_needs_set = 1;
9228 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) {
9230 * We moved beyond this guy's range, re-calculate
9231 * the new end point.
9233 if (rack->rc_gp_filled == 0) {
9234 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
9236 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
9240 * We are moving the goal post, we may be able to clear the
9241 * measure_saw_probe_rtt flag.
9243 if ((rack->in_probe_rtt == 0) &&
9244 (rack->measure_saw_probe_rtt) &&
9245 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
9246 rack->measure_saw_probe_rtt = 0;
9247 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts,
9249 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) |
9250 (uint64_t)rack->r_ctl.rc_gp_output_ts),
9252 if (rack->rc_gp_filled &&
9253 ((tp->gput_ack - tp->gput_seq) <
9254 max(rc_init_window(rack), (MIN_GP_WIN *
9255 ctf_fixed_maxseg(tp))))) {
9256 uint32_t ideal_amount;
9258 ideal_amount = rack_get_measure_window(tp, rack);
9259 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) {
9261 * There is no sense of continuing this measurement
9262 * because its too small to gain us anything we
9263 * trust. Skip it and that way we can start a new
9264 * measurement quicker.
9266 tp->t_flags &= ~TF_GPUTINPROG;
9267 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
9269 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) |
9270 (uint64_t)rack->r_ctl.rc_gp_output_ts),
9271 6, __LINE__, NULL, 0);
9274 * Reset the window further out.
9276 tp->gput_ack = tp->gput_seq + ideal_amount;
9279 rack_tend_gp_marks(tp, rack);
9280 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm);
9285 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm)
9287 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) {
9288 /* Behind our TLP definition or right at */
9291 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) {
9292 /* The start is beyond or right at our end of TLP definition */
9295 /* It has to be a sub-part of the original TLP recorded */
9302 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
9303 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts,
9305 int *moved_two, uint32_t segsiz)
9307 uint32_t start, end, changed = 0;
9308 struct rack_sendmap stack_map;
9309 struct rack_sendmap *rsm, *nrsm, *prev, *next;
9310 int insret __diagused;
9311 int32_t used_ref = 1;
9313 #ifdef TCP_SAD_DETECTION
9315 int first_time_through = 1;
9318 int can_use_hookery = 0;
9320 start = sack->start;
9324 #ifdef TCP_SAD_DETECTION
9326 * There are a strange number of proxys and meddle boxes in the world
9327 * that seem to cut up segments on different boundaries. This gets us
9328 * smaller sacks that are still ok in terms of it being an attacker.
9329 * We use the base segsiz to calculate an allowable smallness but
9330 * also enforce a min on the segsiz in case it is an attacker playing
9331 * games with MSS. So basically if the sack arrives and it is
9332 * larger than a worse case 960 bytes, we don't classify the guy
9335 allow_segsiz = max(segsiz, 1200) * sad_seg_size_per;
9336 allow_segsiz /= 1000;
9339 if ((rsm == NULL) ||
9340 (SEQ_LT(end, rsm->r_start)) ||
9341 (SEQ_GEQ(start, rsm->r_end)) ||
9342 (SEQ_LT(start, rsm->r_start))) {
9344 * We are not in the right spot,
9345 * find the correct spot in the tree.
9348 rsm = tqhash_find(rack->r_ctl.tqh, start);
9355 #ifdef TCP_SAD_DETECTION
9356 /* Now we must check for suspicous activity */
9357 if ((first_time_through == 1) &&
9358 ((end - start) < min((rsm->r_end - rsm->r_start), allow_segsiz)) &&
9359 ((rsm->r_flags & RACK_PMTU_CHG) == 0) &&
9360 ((rsm->r_flags & RACK_TLP) == 0)) {
9362 * Its less than a full MSS or the segment being acked
9363 * this should only happen if the rsm in question had the
9364 * r_just_ret flag set <and> the end matches the end of
9367 * Note we do not look at segments that have had TLP's on
9368 * them since we can get un-reported rwnd collapses that
9369 * basically we TLP on and then we get back a sack block
9370 * that goes from the start to only a small way.
9376 if (SEQ_GEQ(end, rsm->r_end)) {
9377 if (rsm->r_just_ret == 1) {
9378 /* This was at the end of a send which is ok */
9381 /* A bit harder was it the end of our segment */
9384 len = (rsm->r_end - rsm->r_start);
9385 segs = len / segsiz;
9387 if ((segs + (rsm->r_end - start)) == len) {
9389 * So this last bit was the
9390 * end of our send if we cut it
9391 * up into segsiz pieces so its ok.
9399 * This guy is doing something suspicious
9400 * lets start detection.
9402 if (rack->rc_suspicious == 0) {
9403 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_SUSPECT);
9404 counter_u64_add(rack_sack_attacks_suspect, 1);
9405 rack->rc_suspicious = 1;
9406 rack_log_sad(rack, 4);
9407 if (tcp_bblogging_on(rack->rc_tp)) {
9408 union tcp_log_stackspecific log;
9411 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
9412 log.u_bbr.flex1 = end;
9413 log.u_bbr.flex2 = start;
9414 log.u_bbr.flex3 = rsm->r_end;
9415 log.u_bbr.flex4 = rsm->r_start;
9416 log.u_bbr.flex5 = segsiz;
9417 log.u_bbr.flex6 = rsm->r_fas;
9418 log.u_bbr.flex7 = rsm->r_bas;
9419 log.u_bbr.flex8 = 5;
9420 log.u_bbr.pkts_out = rsm->r_flags;
9421 log.u_bbr.bbr_state = rack->rc_suspicious;
9422 log.u_bbr.bbr_substate = rsm->r_just_ret;
9423 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
9424 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
9425 TCP_LOG_EVENTP(rack->rc_tp, NULL,
9426 &rack->rc_inp->inp_socket->so_rcv,
9427 &rack->rc_inp->inp_socket->so_snd,
9428 TCP_SAD_DETECTION, 0,
9429 0, &log, false, &tv);
9432 /* You loose some ack count every time you sack
9433 * a small bit that is not butting to the end of
9434 * what we have sent. This is because we never
9435 * send small bits unless its the end of the sb.
9436 * Anyone sending a sack that is not at the end
9437 * is thus very very suspicious.
9439 loss = (segsiz/2) / (end - start);
9440 if (loss < rack->r_ctl.ack_count)
9441 rack->r_ctl.ack_count -= loss;
9443 rack->r_ctl.ack_count = 0;
9446 first_time_through = 0;
9448 /* Ok we have an ACK for some piece of this rsm */
9449 if (rsm->r_start != start) {
9450 if ((rsm->r_flags & RACK_ACKED) == 0) {
9452 * Before any splitting or hookery is
9453 * done is it a TLP of interest i.e. rxt?
9455 if ((rsm->r_flags & RACK_TLP) &&
9456 (rsm->r_rtr_cnt > 1)) {
9458 * We are splitting a rxt TLP, check
9459 * if we need to save off the start/end
9461 if (rack->rc_last_tlp_acked_set &&
9462 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
9464 * We already turned this on since we are inside
9465 * the previous one was a partially sack now we
9466 * are getting another one (maybe all of it).
9469 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
9471 * Lets make sure we have all of it though.
9473 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
9474 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9475 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9476 rack->r_ctl.last_tlp_acked_end);
9478 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
9479 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9480 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9481 rack->r_ctl.last_tlp_acked_end);
9484 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9485 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9486 rack->rc_last_tlp_past_cumack = 0;
9487 rack->rc_last_tlp_acked_set = 1;
9488 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
9492 * Need to split this in two pieces the before and after,
9493 * the before remains in the map, the after must be
9494 * added. In other words we have:
9495 * rsm |--------------|
9499 * and nrsm will be the sacked piece
9502 * But before we start down that path lets
9503 * see if the sack spans over on top of
9504 * the next guy and it is already sacked.
9508 * Hookery can only be used if the two entries
9509 * are in the same bucket and neither one of
9510 * them staddle the bucket line.
9512 next = tqhash_next(rack->r_ctl.tqh, rsm);
9514 (rsm->bindex == next->bindex) &&
9515 ((rsm->r_flags & RACK_STRADDLE) == 0) &&
9516 ((next->r_flags & RACK_STRADDLE) == 0) &&
9517 (rsm->r_flags & RACK_IN_GP_WIN) &&
9518 (next->r_flags & RACK_IN_GP_WIN))
9519 can_use_hookery = 1;
9521 (rsm->bindex == next->bindex) &&
9522 ((rsm->r_flags & RACK_STRADDLE) == 0) &&
9523 ((next->r_flags & RACK_STRADDLE) == 0) &&
9524 ((rsm->r_flags & RACK_IN_GP_WIN) == 0) &&
9525 ((next->r_flags & RACK_IN_GP_WIN) == 0))
9526 can_use_hookery = 1;
9528 can_use_hookery = 0;
9529 if (next && can_use_hookery &&
9530 (next->r_flags & RACK_ACKED) &&
9531 SEQ_GEQ(end, next->r_start)) {
9533 * So the next one is already acked, and
9534 * we can thus by hookery use our stack_map
9535 * to reflect the piece being sacked and
9536 * then adjust the two tree entries moving
9537 * the start and ends around. So we start like:
9538 * rsm |------------| (not-acked)
9539 * next |-----------| (acked)
9540 * sackblk |-------->
9541 * We want to end like so:
9542 * rsm |------| (not-acked)
9543 * next |-----------------| (acked)
9545 * Where nrsm is a temporary stack piece we
9546 * use to update all the gizmos.
9548 /* Copy up our fudge block */
9551 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
9552 /* Now adjust our tree blocks */
9554 next->r_start = start;
9555 rsm->r_flags |= RACK_SHUFFLED;
9556 next->r_flags |= RACK_SHUFFLED;
9557 /* Now we must adjust back where next->m is */
9558 rack_setup_offset_for_rsm(rack, rsm, next);
9560 * Which timestamp do we keep? It is rather
9561 * important in GP measurements to have the
9562 * accurate end of the send window.
9564 * We keep the largest value, which is the newest
9565 * send. We do this in case a segment that is
9566 * joined together and not part of a GP estimate
9567 * later gets expanded into the GP estimate.
9569 * We prohibit the merging of unlike kinds i.e.
9570 * all pieces that are in the GP estimate can be
9571 * merged and all pieces that are not in a GP estimate
9572 * can be merged, but not disimilar pieces. Combine
9573 * this with taking the highest here and we should
9574 * be ok unless of course the client reneges. Then
9577 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] <
9578 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)])
9579 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)];
9581 * And we must keep the newest ack arrival time.
9583 if (next->r_ack_arrival <
9584 rack_to_usec_ts(&rack->r_ctl.act_rcv_time))
9585 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
9588 /* We don't need to adjust rsm, it did not change */
9589 /* Clear out the dup ack count of the remainder */
9591 rsm->r_just_ret = 0;
9592 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
9593 /* Now lets make sure our fudge block is right */
9594 nrsm->r_start = start;
9595 /* Now lets update all the stats and such */
9596 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
9597 if (rack->app_limited_needs_set)
9598 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
9599 changed += (nrsm->r_end - nrsm->r_start);
9600 /* You get a count for acking a whole segment or more */
9601 if ((nrsm->r_end - nrsm->r_start) >= segsiz)
9602 rack->r_ctl.ack_count += ((nrsm->r_end - nrsm->r_start) / segsiz);
9603 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
9604 if (nrsm->r_flags & RACK_SACK_PASSED) {
9605 rack->r_ctl.rc_reorder_ts = cts;
9606 if (rack->r_ctl.rc_reorder_ts == 0)
9607 rack->r_ctl.rc_reorder_ts = 1;
9610 * Now we want to go up from rsm (the
9611 * one left un-acked) to the next one
9612 * in the tmap. We do this so when
9613 * we walk backwards we include marking
9614 * sack-passed on rsm (The one passed in
9615 * is skipped since it is generally called
9616 * on something sacked before removing it
9619 if (rsm->r_in_tmap) {
9620 nrsm = TAILQ_NEXT(rsm, r_tnext);
9622 * Now that we have the next
9623 * one walk backwards from there.
9625 if (nrsm && nrsm->r_in_tmap)
9626 rack_log_sack_passed(tp, rack, nrsm);
9628 /* Now are we done? */
9629 if (SEQ_LT(end, next->r_end) ||
9630 (end == next->r_end)) {
9631 /* Done with block */
9634 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__);
9635 counter_u64_add(rack_sack_used_next_merge, 1);
9636 /* Postion for the next block */
9637 start = next->r_end;
9638 rsm = tqhash_next(rack->r_ctl.tqh, next);
9643 * We can't use any hookery here, so we
9644 * need to split the map. We enter like
9648 * We will add the new block nrsm and
9649 * that will be the new portion, and then
9650 * fall through after reseting rsm. So we
9651 * split and look like this:
9655 * We then fall through reseting
9656 * rsm to nrsm, so the next block
9659 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
9662 * failed XXXrrs what can we do but loose the sack
9667 counter_u64_add(rack_sack_splits, 1);
9668 rack_clone_rsm(rack, nrsm, rsm, start);
9670 rsm->r_just_ret = 0;
9672 (void)tqhash_insert(rack->r_ctl.tqh, nrsm);
9674 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
9675 panic("Insert in rb tree of %p fails ret:%d rack:%p rsm:%p",
9676 nrsm, insret, rack, rsm);
9679 if (rsm->r_in_tmap) {
9680 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
9681 nrsm->r_in_tmap = 1;
9683 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__);
9684 rsm->r_flags &= (~RACK_HAS_FIN);
9685 /* Position us to point to the new nrsm that starts the sack blk */
9689 /* Already sacked this piece */
9690 counter_u64_add(rack_sack_skipped_acked, 1);
9692 if (end == rsm->r_end) {
9693 /* Done with block */
9694 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
9696 } else if (SEQ_LT(end, rsm->r_end)) {
9697 /* A partial sack to a already sacked block */
9699 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
9703 * The end goes beyond this guy
9704 * reposition the start to the
9708 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
9714 if (SEQ_GEQ(end, rsm->r_end)) {
9716 * The end of this block is either beyond this guy or right
9717 * at this guy. I.e.:
9723 if ((rsm->r_flags & RACK_ACKED) == 0) {
9725 * Is it a TLP of interest?
9727 if ((rsm->r_flags & RACK_TLP) &&
9728 (rsm->r_rtr_cnt > 1)) {
9730 * We are splitting a rxt TLP, check
9731 * if we need to save off the start/end
9733 if (rack->rc_last_tlp_acked_set &&
9734 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
9736 * We already turned this on since we are inside
9737 * the previous one was a partially sack now we
9738 * are getting another one (maybe all of it).
9740 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
9742 * Lets make sure we have all of it though.
9744 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
9745 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9746 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9747 rack->r_ctl.last_tlp_acked_end);
9749 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
9750 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9751 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9752 rack->r_ctl.last_tlp_acked_end);
9755 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9756 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9757 rack->rc_last_tlp_past_cumack = 0;
9758 rack->rc_last_tlp_acked_set = 1;
9759 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
9762 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
9763 changed += (rsm->r_end - rsm->r_start);
9764 /* You get a count for acking a whole segment or more */
9765 if ((rsm->r_end - rsm->r_start) >= segsiz)
9766 rack->r_ctl.ack_count += ((rsm->r_end - rsm->r_start) / segsiz);
9767 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
9768 if (rsm->r_in_tmap) /* should be true */
9769 rack_log_sack_passed(tp, rack, rsm);
9770 /* Is Reordering occuring? */
9771 if (rsm->r_flags & RACK_SACK_PASSED) {
9772 rsm->r_flags &= ~RACK_SACK_PASSED;
9773 rack->r_ctl.rc_reorder_ts = cts;
9774 if (rack->r_ctl.rc_reorder_ts == 0)
9775 rack->r_ctl.rc_reorder_ts = 1;
9777 if (rack->app_limited_needs_set)
9778 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
9779 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
9780 rsm->r_flags |= RACK_ACKED;
9781 if (rsm->r_in_tmap) {
9782 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
9785 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__);
9787 counter_u64_add(rack_sack_skipped_acked, 1);
9790 if (end == rsm->r_end) {
9791 /* This block only - done, setup for next */
9795 * There is more not coverend by this rsm move on
9796 * to the next block in the RB tree.
9798 nrsm = tqhash_next(rack->r_ctl.tqh, rsm);
9806 * The end of this sack block is smaller than
9811 if ((rsm->r_flags & RACK_ACKED) == 0) {
9813 * Is it a TLP of interest?
9815 if ((rsm->r_flags & RACK_TLP) &&
9816 (rsm->r_rtr_cnt > 1)) {
9818 * We are splitting a rxt TLP, check
9819 * if we need to save off the start/end
9821 if (rack->rc_last_tlp_acked_set &&
9822 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
9824 * We already turned this on since we are inside
9825 * the previous one was a partially sack now we
9826 * are getting another one (maybe all of it).
9828 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
9830 * Lets make sure we have all of it though.
9832 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
9833 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9834 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9835 rack->r_ctl.last_tlp_acked_end);
9837 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
9838 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9839 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9840 rack->r_ctl.last_tlp_acked_end);
9843 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9844 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9845 rack->rc_last_tlp_past_cumack = 0;
9846 rack->rc_last_tlp_acked_set = 1;
9847 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
9851 * Hookery can only be used if the two entries
9852 * are in the same bucket and neither one of
9853 * them staddle the bucket line.
9855 prev = tqhash_prev(rack->r_ctl.tqh, rsm);
9857 (rsm->bindex == prev->bindex) &&
9858 ((rsm->r_flags & RACK_STRADDLE) == 0) &&
9859 ((prev->r_flags & RACK_STRADDLE) == 0) &&
9860 (rsm->r_flags & RACK_IN_GP_WIN) &&
9861 (prev->r_flags & RACK_IN_GP_WIN))
9862 can_use_hookery = 1;
9864 (rsm->bindex == prev->bindex) &&
9865 ((rsm->r_flags & RACK_STRADDLE) == 0) &&
9866 ((prev->r_flags & RACK_STRADDLE) == 0) &&
9867 ((rsm->r_flags & RACK_IN_GP_WIN) == 0) &&
9868 ((prev->r_flags & RACK_IN_GP_WIN) == 0))
9869 can_use_hookery = 1;
9871 can_use_hookery = 0;
9873 if (prev && can_use_hookery &&
9874 (prev->r_flags & RACK_ACKED)) {
9876 * Goal, we want the right remainder of rsm to shrink
9877 * in place and span from (rsm->r_start = end) to rsm->r_end.
9878 * We want to expand prev to go all the way
9879 * to prev->r_end <- end.
9880 * so in the tree we have before:
9881 * prev |--------| (acked)
9882 * rsm |-------| (non-acked)
9884 * We churn it so we end up with
9885 * prev |----------| (acked)
9886 * rsm |-----| (non-acked)
9887 * nrsm |-| (temporary)
9889 * Note if either prev/rsm is a TLP we don't
9894 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
9897 rsm->r_flags |= RACK_SHUFFLED;
9898 prev->r_flags |= RACK_SHUFFLED;
9899 /* Now adjust nrsm (stack copy) to be
9900 * the one that is the small
9901 * piece that was "sacked".
9906 * Which timestamp do we keep? It is rather
9907 * important in GP measurements to have the
9908 * accurate end of the send window.
9910 * We keep the largest value, which is the newest
9911 * send. We do this in case a segment that is
9912 * joined together and not part of a GP estimate
9913 * later gets expanded into the GP estimate.
9915 * We prohibit the merging of unlike kinds i.e.
9916 * all pieces that are in the GP estimate can be
9917 * merged and all pieces that are not in a GP estimate
9918 * can be merged, but not disimilar pieces. Combine
9919 * this with taking the highest here and we should
9920 * be ok unless of course the client reneges. Then
9923 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] <
9924 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) {
9925 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
9928 * And we must keep the newest ack arrival time.
9931 if(prev->r_ack_arrival <
9932 rack_to_usec_ts(&rack->r_ctl.act_rcv_time))
9933 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
9935 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
9937 * Now that the rsm has had its start moved forward
9938 * lets go ahead and get its new place in the world.
9940 rack_setup_offset_for_rsm(rack, prev, rsm);
9942 * Now nrsm is our new little piece
9943 * that is acked (which was merged
9944 * to prev). Update the rtt and changed
9945 * based on that. Also check for reordering.
9947 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
9948 if (rack->app_limited_needs_set)
9949 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
9950 changed += (nrsm->r_end - nrsm->r_start);
9951 /* You get a count for acking a whole segment or more */
9952 if ((nrsm->r_end - nrsm->r_start) >= segsiz)
9953 rack->r_ctl.ack_count += ((nrsm->r_end - nrsm->r_start) / segsiz);
9955 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
9956 if (nrsm->r_flags & RACK_SACK_PASSED) {
9957 rack->r_ctl.rc_reorder_ts = cts;
9958 if (rack->r_ctl.rc_reorder_ts == 0)
9959 rack->r_ctl.rc_reorder_ts = 1;
9961 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__);
9963 counter_u64_add(rack_sack_used_prev_merge, 1);
9966 * This is the case where our previous
9967 * block is not acked either, so we must
9968 * split the block in two.
9970 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
9972 /* failed rrs what can we do but loose the sack info? */
9975 if ((rsm->r_flags & RACK_TLP) &&
9976 (rsm->r_rtr_cnt > 1)) {
9978 * We are splitting a rxt TLP, check
9979 * if we need to save off the start/end
9981 if (rack->rc_last_tlp_acked_set &&
9982 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
9984 * We already turned this on since this block is inside
9985 * the previous one was a partially sack now we
9986 * are getting another one (maybe all of it).
9988 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
9990 * Lets make sure we have all of it though.
9992 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
9993 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9994 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9995 rack->r_ctl.last_tlp_acked_end);
9997 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
9998 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9999 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
10000 rack->r_ctl.last_tlp_acked_end);
10003 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
10004 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
10005 rack->rc_last_tlp_acked_set = 1;
10006 rack->rc_last_tlp_past_cumack = 0;
10007 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
10011 * In this case nrsm becomes
10012 * nrsm->r_start = end;
10013 * nrsm->r_end = rsm->r_end;
10014 * which is un-acked.
10016 * rsm->r_end = nrsm->r_start;
10017 * i.e. the remaining un-acked
10018 * piece is left on the left
10021 * So we start like this
10022 * rsm |----------| (not acked)
10024 * build it so we have
10025 * rsm |---| (acked)
10026 * nrsm |------| (not acked)
10028 counter_u64_add(rack_sack_splits, 1);
10029 rack_clone_rsm(rack, nrsm, rsm, end);
10031 rsm->r_flags &= (~RACK_HAS_FIN);
10032 rsm->r_just_ret = 0;
10034 (void)tqhash_insert(rack->r_ctl.tqh, nrsm);
10036 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
10037 panic("Insert in rb tree of %p fails ret:% rack:%p rsm:%p",
10038 nrsm, insret, rack, rsm);
10041 if (rsm->r_in_tmap) {
10042 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
10043 nrsm->r_in_tmap = 1;
10045 nrsm->r_dupack = 0;
10046 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
10047 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
10048 changed += (rsm->r_end - rsm->r_start);
10049 /* You get a count for acking a whole segment or more */
10050 if ((rsm->r_end - rsm->r_start) >= segsiz)
10051 rack->r_ctl.ack_count += ((rsm->r_end - rsm->r_start) / segsiz);
10053 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
10054 if (rsm->r_in_tmap) /* should be true */
10055 rack_log_sack_passed(tp, rack, rsm);
10056 /* Is Reordering occuring? */
10057 if (rsm->r_flags & RACK_SACK_PASSED) {
10058 rsm->r_flags &= ~RACK_SACK_PASSED;
10059 rack->r_ctl.rc_reorder_ts = cts;
10060 if (rack->r_ctl.rc_reorder_ts == 0)
10061 rack->r_ctl.rc_reorder_ts = 1;
10063 if (rack->app_limited_needs_set)
10064 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
10065 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
10066 rsm->r_flags |= RACK_ACKED;
10067 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__);
10068 if (rsm->r_in_tmap) {
10069 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
10070 rsm->r_in_tmap = 0;
10073 } else if (start != end){
10075 * The block was already acked.
10077 counter_u64_add(rack_sack_skipped_acked, 1);
10082 ((rsm->r_flags & RACK_TLP) == 0) &&
10083 (rsm->r_flags & RACK_ACKED)) {
10085 * Now can we merge where we worked
10086 * with either the previous or
10089 next = tqhash_next(rack->r_ctl.tqh, rsm);
10091 if (next->r_flags & RACK_TLP)
10093 /* Only allow merges between ones in or out of GP window */
10094 if ((next->r_flags & RACK_IN_GP_WIN) &&
10095 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) {
10098 if ((rsm->r_flags & RACK_IN_GP_WIN) &&
10099 ((next->r_flags & RACK_IN_GP_WIN) == 0)) {
10102 if (rsm->bindex != next->bindex)
10104 if (rsm->r_flags & RACK_STRADDLE)
10106 if (next->r_flags & RACK_STRADDLE)
10108 if (next->r_flags & RACK_ACKED) {
10109 /* yep this and next can be merged */
10110 rsm = rack_merge_rsm(rack, rsm, next);
10112 next = tqhash_next(rack->r_ctl.tqh, rsm);
10116 /* Now what about the previous? */
10117 prev = tqhash_prev(rack->r_ctl.tqh, rsm);
10119 if (prev->r_flags & RACK_TLP)
10121 /* Only allow merges between ones in or out of GP window */
10122 if ((prev->r_flags & RACK_IN_GP_WIN) &&
10123 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) {
10126 if ((rsm->r_flags & RACK_IN_GP_WIN) &&
10127 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) {
10130 if (rsm->bindex != prev->bindex)
10132 if (rsm->r_flags & RACK_STRADDLE)
10134 if (prev->r_flags & RACK_STRADDLE)
10136 if (prev->r_flags & RACK_ACKED) {
10137 /* yep the previous and this can be merged */
10138 rsm = rack_merge_rsm(rack, prev, rsm);
10140 prev = tqhash_prev(rack->r_ctl.tqh, rsm);
10145 if (used_ref == 0) {
10146 counter_u64_add(rack_sack_proc_all, 1);
10148 counter_u64_add(rack_sack_proc_short, 1);
10150 /* Save off the next one for quick reference. */
10151 nrsm = tqhash_find(rack->r_ctl.tqh, end);
10152 *prsm = rack->r_ctl.rc_sacklast = nrsm;
10153 /* Pass back the moved. */
10154 *moved_two = moved;
10155 *no_extra = noextra;
10160 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
10162 struct rack_sendmap *tmap;
10165 while (rsm && (rsm->r_flags & RACK_ACKED)) {
10166 /* Its no longer sacked, mark it so */
10167 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
10169 if (rsm->r_in_tmap) {
10170 panic("rack:%p rsm:%p flags:0x%x in tmap?",
10171 rack, rsm, rsm->r_flags);
10174 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
10175 /* Rebuild it into our tmap */
10176 if (tmap == NULL) {
10177 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
10180 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
10183 tmap->r_in_tmap = 1;
10184 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
10187 * Now lets possibly clear the sack filter so we start
10188 * recognizing sacks that cover this area.
10190 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
10195 rack_do_decay(struct tcp_rack *rack)
10197 struct timeval res;
10199 #define timersub(tvp, uvp, vvp) \
10201 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
10202 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
10203 if ((vvp)->tv_usec < 0) { \
10205 (vvp)->tv_usec += 1000000; \
10209 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res);
10212 rack->r_ctl.input_pkt++;
10213 if ((rack->rc_in_persist) ||
10214 (res.tv_sec >= 1) ||
10215 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) {
10217 * Check for decay of non-SAD,
10218 * we want all SAD detection metrics to
10219 * decay 1/4 per second (or more) passed.
10220 * Current default is 800 so it decays
10221 * 80% every second.
10223 #ifdef TCP_SAD_DETECTION
10224 uint32_t pkt_delta;
10226 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt;
10228 /* Update our saved tracking values */
10229 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt;
10230 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
10231 /* Now do we escape without decay? */
10232 #ifdef TCP_SAD_DETECTION
10233 if (rack->rc_in_persist ||
10234 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) ||
10235 (pkt_delta < tcp_sad_low_pps)){
10237 * We don't decay idle connections
10238 * or ones that have a low input pps.
10242 /* Decay the counters */
10243 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count,
10244 tcp_sad_decay_val);
10245 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count,
10246 tcp_sad_decay_val);
10247 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra,
10248 tcp_sad_decay_val);
10249 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move,
10250 tcp_sad_decay_val);
10256 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from)
10259 * We look at advancing the end send time for our GP
10260 * measurement tracking only as the cumulative acknowledgment
10261 * moves forward. You might wonder about this, why not
10262 * at every transmission or retransmission within the
10263 * GP window update the rc_gp_cumack_ts? Well its rather
10264 * nuanced but basically the GP window *may* expand (as
10265 * it does below) or worse and harder to track it may shrink.
10267 * This last makes it impossible to track at the time of
10268 * the send, since you may set forward your rc_gp_cumack_ts
10269 * when you send, because that send *is* in your currently
10270 * "guessed" window, but then it shrinks. Now which was
10271 * the send time of the last bytes in the window, by the
10272 * time you ask that question that part of the sendmap
10273 * is freed. So you don't know and you will have too
10274 * long of send window. Instead by updating the time
10275 * marker only when the cumack advances this assures us
10276 * that we will have only the sends in the window of our
10279 * Another complication from this is the
10280 * merging of sendmap entries. During SACK processing this
10281 * can happen to conserve the sendmap size. That breaks
10282 * everything down in tracking the send window of the GP
10283 * estimate. So to prevent that and keep it working with
10284 * a tiny bit more limited merging, we only allow like
10285 * types to be merged. I.e. if two sends are in the GP window
10286 * then its ok to merge them together. If two sends are not
10287 * in the GP window its ok to merge them together too. Though
10288 * one send in and one send out cannot be merged. We combine
10289 * this with never allowing the shrinking of the GP window when
10290 * we are in recovery so that we can properly calculate the
10293 * This all of course seems complicated, because it is.. :)
10295 * The cum-ack is being advanced upon the sendmap.
10296 * If we are not doing a GP estimate don't
10301 if ((tp->t_flags & TF_GPUTINPROG) == 0)
10304 * If this sendmap entry is going
10305 * beyond the measurement window we had picked,
10306 * expand the measurement window by that much.
10308 if (SEQ_GT(rsm->r_end, tp->gput_ack)) {
10309 tp->gput_ack = rsm->r_end;
10312 * If we have not setup a ack, then we
10313 * have no idea if the newly acked pieces
10314 * will be "in our seq measurement range". If
10315 * it is when we clear the app_limited_needs_set
10316 * flag the timestamp will be updated.
10318 if (rack->app_limited_needs_set)
10321 * Finally, we grab out the latest timestamp
10322 * that this packet was sent and then see
10324 * a) The packet touches are newly defined GP range.
10325 * b) The time is greater than (newer) than the
10326 * one we currently have. If so we update
10327 * our sending end time window.
10329 * Note we *do not* do this at send time. The reason
10330 * is that if you do you *may* pick up a newer timestamp
10331 * for a range you are not going to measure. We project
10332 * out how far and then sometimes modify that to be
10333 * smaller. If that occurs then you will have a send
10334 * that does not belong to the range included.
10336 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <=
10337 rack->r_ctl.rc_gp_cumack_ts)
10339 if (rack_in_gp_window(tp, rsm)) {
10340 rack->r_ctl.rc_gp_cumack_ts = ts;
10341 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end,
10342 __LINE__, from, rsm);
10347 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to, uint64_t acktime)
10349 struct rack_sendmap *rsm;
10351 * The ACK point is advancing to th_ack, we must drop off
10352 * the packets in the rack log and calculate any eligble
10356 rack->r_wanted_output = 1;
10357 if (SEQ_GT(th_ack, tp->snd_una))
10358 rack->r_ctl.last_cumack_advance = acktime;
10360 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */
10361 if ((rack->rc_last_tlp_acked_set == 1)&&
10362 (rack->rc_last_tlp_past_cumack == 1) &&
10363 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) {
10365 * We have reached the point where our last rack
10366 * tlp retransmit sequence is ahead of the cum-ack.
10367 * This can only happen when the cum-ack moves all
10368 * the way around (its been a full 2^^31+1 bytes
10369 * or more since we sent a retransmitted TLP). Lets
10370 * turn off the valid flag since its not really valid.
10372 * Note since sack's also turn on this event we have
10373 * a complication, we have to wait to age it out until
10374 * the cum-ack is by the TLP before checking which is
10375 * what the next else clause does.
10377 rack_log_dsack_event(rack, 9, __LINE__,
10378 rack->r_ctl.last_tlp_acked_start,
10379 rack->r_ctl.last_tlp_acked_end);
10380 rack->rc_last_tlp_acked_set = 0;
10381 rack->rc_last_tlp_past_cumack = 0;
10382 } else if ((rack->rc_last_tlp_acked_set == 1) &&
10383 (rack->rc_last_tlp_past_cumack == 0) &&
10384 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) {
10386 * It is safe to start aging TLP's out.
10388 rack->rc_last_tlp_past_cumack = 1;
10390 /* We do the same for the tlp send seq as well */
10391 if ((rack->rc_last_sent_tlp_seq_valid == 1) &&
10392 (rack->rc_last_sent_tlp_past_cumack == 1) &&
10393 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) {
10394 rack_log_dsack_event(rack, 9, __LINE__,
10395 rack->r_ctl.last_sent_tlp_seq,
10396 (rack->r_ctl.last_sent_tlp_seq +
10397 rack->r_ctl.last_sent_tlp_len));
10398 rack->rc_last_sent_tlp_seq_valid = 0;
10399 rack->rc_last_sent_tlp_past_cumack = 0;
10400 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) &&
10401 (rack->rc_last_sent_tlp_past_cumack == 0) &&
10402 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) {
10404 * It is safe to start aging TLP's send.
10406 rack->rc_last_sent_tlp_past_cumack = 1;
10409 rsm = tqhash_min(rack->r_ctl.tqh);
10411 if ((th_ack - 1) == tp->iss) {
10413 * For the SYN incoming case we will not
10414 * have called tcp_output for the sending of
10415 * the SYN, so there will be no map. All
10416 * other cases should probably be a panic.
10420 if (tp->t_flags & TF_SENTFIN) {
10421 /* if we sent a FIN we often will not have map */
10425 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n",
10427 tp->t_state, th_ack, rack,
10428 tp->snd_una, tp->snd_max, tp->snd_nxt);
10432 if (SEQ_LT(th_ack, rsm->r_start)) {
10433 /* Huh map is missing this */
10435 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
10437 th_ack, tp->t_state, rack->r_state);
10441 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack);
10443 /* Now was it a retransmitted TLP? */
10444 if ((rsm->r_flags & RACK_TLP) &&
10445 (rsm->r_rtr_cnt > 1)) {
10447 * Yes, this rsm was a TLP and retransmitted, remember that
10448 * since if a DSACK comes back on this we don't want
10449 * to think of it as a reordered segment. This may
10450 * get updated again with possibly even other TLPs
10451 * in flight, but thats ok. Only when we don't send
10452 * a retransmitted TLP for 1/2 the sequences space
10453 * will it get turned off (above).
10455 if (rack->rc_last_tlp_acked_set &&
10456 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
10458 * We already turned this on since the end matches,
10459 * the previous one was a partially ack now we
10460 * are getting another one (maybe all of it).
10462 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
10464 * Lets make sure we have all of it though.
10466 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
10467 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
10468 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
10469 rack->r_ctl.last_tlp_acked_end);
10471 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
10472 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
10473 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
10474 rack->r_ctl.last_tlp_acked_end);
10477 rack->rc_last_tlp_past_cumack = 1;
10478 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
10479 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
10480 rack->rc_last_tlp_acked_set = 1;
10481 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
10484 /* Now do we consume the whole thing? */
10485 if (SEQ_GEQ(th_ack, rsm->r_end)) {
10486 /* Its all consumed. */
10488 uint8_t newly_acked;
10490 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__);
10491 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
10492 rsm->r_rtr_bytes = 0;
10494 * Record the time of highest cumack sent if its in our measurement
10495 * window and possibly bump out the end.
10497 rack_rsm_sender_update(rack, tp, rsm, 4);
10498 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK);
10499 if (rsm->r_in_tmap) {
10500 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
10501 rsm->r_in_tmap = 0;
10504 if (rsm->r_flags & RACK_ACKED) {
10506 * It was acked on the scoreboard -- remove
10509 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
10511 } else if (rsm->r_flags & RACK_SACK_PASSED) {
10513 * There are segments ACKED on the
10514 * scoreboard further up. We are seeing
10517 rsm->r_flags &= ~RACK_SACK_PASSED;
10518 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
10519 rsm->r_flags |= RACK_ACKED;
10520 rack->r_ctl.rc_reorder_ts = cts;
10521 if (rack->r_ctl.rc_reorder_ts == 0)
10522 rack->r_ctl.rc_reorder_ts = 1;
10523 if (rack->r_ent_rec_ns) {
10525 * We have sent no more, and we saw an sack
10528 rack->r_might_revert = 1;
10531 if ((rsm->r_flags & RACK_TO_REXT) &&
10532 (tp->t_flags & TF_RCVD_TSTMP) &&
10533 (to->to_flags & TOF_TS) &&
10534 (to->to_tsecr != 0) &&
10535 (tp->t_flags & TF_PREVVALID)) {
10537 * We can use the timestamp to see
10538 * if this retransmission was from the
10539 * first transmit. If so we made a mistake.
10541 tp->t_flags &= ~TF_PREVVALID;
10542 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) {
10543 /* The first transmit is what this ack is for */
10544 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__);
10547 left = th_ack - rsm->r_end;
10548 if (rack->app_limited_needs_set && newly_acked)
10549 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK);
10550 /* Free back to zone */
10551 rack_free(rack, rsm);
10555 /* Check for reneging */
10556 rsm = tqhash_min(rack->r_ctl.tqh);
10557 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
10559 * The peer has moved snd_una up to
10560 * the edge of this send, i.e. one
10561 * that it had previously acked. The only
10562 * way that can be true if the peer threw
10563 * away data (space issues) that it had
10564 * previously sacked (else it would have
10565 * given us snd_una up to (rsm->r_end).
10566 * We need to undo the acked markings here.
10568 * Note we have to look to make sure th_ack is
10569 * our rsm->r_start in case we get an old ack
10570 * where th_ack is behind snd_una.
10572 rack_peer_reneges(rack, rsm, th_ack);
10576 if (rsm->r_flags & RACK_ACKED) {
10578 * It was acked on the scoreboard -- remove it from
10579 * total for the part being cum-acked.
10581 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
10584 * Clear the dup ack count for
10585 * the piece that remains.
10588 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
10589 if (rsm->r_rtr_bytes) {
10591 * It was retransmitted adjust the
10592 * sack holes for what was acked.
10596 ack_am = (th_ack - rsm->r_start);
10597 if (ack_am >= rsm->r_rtr_bytes) {
10598 rack->r_ctl.rc_holes_rxt -= ack_am;
10599 rsm->r_rtr_bytes -= ack_am;
10603 * Update where the piece starts and record
10604 * the time of send of highest cumack sent if
10605 * its in our GP range.
10607 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__);
10608 /* Now we need to move our offset forward too */
10610 ((rsm->orig_m_len != rsm->m->m_len) ||
10611 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) {
10612 /* Fix up the orig_m_len and possibly the mbuf offset */
10613 rack_adjust_orig_mlen(rsm);
10615 rsm->soff += (th_ack - rsm->r_start);
10616 rack_rsm_sender_update(rack, tp, rsm, 5);
10617 /* The trim will move th_ack into r_start for us */
10618 tqhash_trim(rack->r_ctl.tqh, th_ack);
10619 /* Now do we need to move the mbuf fwd too? */
10621 while (rsm->soff >= rsm->m->m_len) {
10622 rsm->soff -= rsm->m->m_len;
10623 rsm->m = rsm->m->m_next;
10624 KASSERT((rsm->m != NULL),
10625 (" nrsm:%p hit at soff:%u null m",
10628 rsm->orig_m_len = rsm->m->m_len;
10629 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
10631 if (rack->app_limited_needs_set &&
10632 SEQ_GEQ(th_ack, tp->gput_seq))
10633 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG);
10637 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack)
10639 struct rack_sendmap *rsm;
10640 int sack_pass_fnd = 0;
10642 if (rack->r_might_revert) {
10644 * Ok we have reordering, have not sent anything, we
10645 * might want to revert the congestion state if nothing
10646 * further has SACK_PASSED on it. Lets check.
10648 * We also get here when we have DSACKs come in for
10649 * all the data that we FR'd. Note that a rxt or tlp
10650 * timer clears this from happening.
10653 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
10654 if (rsm->r_flags & RACK_SACK_PASSED) {
10659 if (sack_pass_fnd == 0) {
10661 * We went into recovery
10662 * incorrectly due to reordering!
10666 rack->r_ent_rec_ns = 0;
10667 orig_cwnd = tp->snd_cwnd;
10668 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec;
10669 tp->snd_recover = tp->snd_una;
10670 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__);
10671 EXIT_RECOVERY(tp->t_flags);
10673 rack->r_might_revert = 0;
10677 #ifdef TCP_SAD_DETECTION
10680 rack_merge_out_sacks(struct tcp_rack *rack)
10682 struct rack_sendmap *cur, *next, *rsm, *trsm = NULL;
10684 cur = tqhash_min(rack->r_ctl.tqh);
10686 next = tqhash_next(rack->r_ctl.tqh, cur);
10688 * The idea is to go through all and merge back
10689 * together the pieces sent together,
10691 if ((next != NULL) &&
10692 (cur->r_tim_lastsent[0] == next->r_tim_lastsent[0])) {
10693 rack_merge_rsm(rack, cur, next);
10699 * now treat it like a rxt event, everything is outstanding
10700 * and sent nothing acvked and dupacks are all zero. If this
10701 * is not an attacker it will have to dupack its way through
10704 TAILQ_INIT(&rack->r_ctl.rc_tmap);
10705 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) {
10707 /* We must re-add it back to the tlist */
10708 if (trsm == NULL) {
10709 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
10711 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
10713 rsm->r_in_tmap = 1;
10715 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED);
10717 sack_filter_clear(&rack->r_ctl.rack_sf, rack->rc_tp->snd_una);
10721 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz)
10723 int do_detection = 0;
10725 if (rack->sack_attack_disable || rack->rc_suspicious) {
10727 * If we have been disabled we must detect
10728 * to possibly reverse it. Or if the guy has
10729 * sent in suspicious sacks we want to do detection too.
10733 } else if ((rack->do_detection || tcp_force_detection) &&
10734 (tcp_sack_to_ack_thresh > 0) &&
10735 (tcp_sack_to_move_thresh > 0) &&
10736 (rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum)) {
10738 * We only detect here if:
10739 * 1) System wide forcing is on <or> do_detection is on
10741 * 2) We have thresholds for move and ack (set one to 0 and we are off)
10743 * 3) We have maps allocated larger than our min (500).
10747 if (do_detection > 0) {
10749 * We have thresholds set to find
10750 * possible attackers and disable sack.
10753 uint64_t ackratio, moveratio, movetotal;
10755 /* Log detecting */
10756 rack_log_sad(rack, 1);
10757 /* Do we establish a ack ratio */
10758 if ((rack->r_ctl.sack_count > tcp_map_minimum) ||
10759 (rack->rc_suspicious == 1) ||
10760 (rack->sack_attack_disable > 0)) {
10761 ackratio = (uint64_t)(rack->r_ctl.sack_count);
10762 ackratio *= (uint64_t)(1000);
10763 if (rack->r_ctl.ack_count)
10764 ackratio /= (uint64_t)(rack->r_ctl.ack_count);
10766 /* We can hit this due to ack totals degregation (via small sacks) */
10771 * No ack ratio needed if we have not
10772 * seen more sacks then the number of map entries.
10773 * The exception to that is if we have disabled sack then
10774 * we need to find a ratio.
10779 if ((rack->sack_attack_disable == 0) &&
10780 (ackratio > rack_highest_sack_thresh_seen))
10781 rack_highest_sack_thresh_seen = (uint32_t)ackratio;
10782 /* Do we establish a move ratio? */
10783 if ((rack->r_ctl.sack_moved_extra > tcp_map_minimum) ||
10784 (rack->rc_suspicious == 1) ||
10785 (rack->sack_attack_disable > 0)) {
10787 * We need to have more sack moves than maps
10788 * allocated to have a move ratio considered.
10790 movetotal = rack->r_ctl.sack_moved_extra;
10791 movetotal += rack->r_ctl.sack_noextra_move;
10792 moveratio = rack->r_ctl.sack_moved_extra;
10793 moveratio *= (uint64_t)1000;
10795 moveratio /= movetotal;
10797 /* No moves, thats pretty good */
10802 * Not enough moves have occured to consider
10803 * if we are out of whack in that ratio.
10804 * The exception to that is if we have disabled sack then
10805 * we need to find a ratio.
10809 if ((rack->sack_attack_disable == 0) &&
10810 (moveratio > rack_highest_move_thresh_seen))
10811 rack_highest_move_thresh_seen = (uint32_t)moveratio;
10812 /* Now the tests */
10813 if (rack->sack_attack_disable == 0) {
10814 /* Not disabled, do we need to disable? */
10815 if ((ackratio > tcp_sack_to_ack_thresh) &&
10816 (moveratio > tcp_sack_to_move_thresh)) {
10817 /* Disable sack processing */
10818 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_TRIGGERED);
10819 rack->sack_attack_disable = 1;
10820 /* set it so we have the built in delay */
10821 rack->r_ctl.ack_during_sd = 1;
10822 if (rack_merge_out_sacks_on_attack)
10823 rack_merge_out_sacks(rack);
10824 counter_u64_add(rack_sack_attacks_detected, 1);
10825 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_TRIGGERED);
10826 /* Clamp the cwnd at flight size */
10827 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd;
10828 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
10829 rack_log_sad(rack, 2);
10832 /* We are sack-disabled check for false positives */
10833 if ((ackratio <= tcp_restoral_thresh) ||
10834 ((rack_merge_out_sacks_on_attack == 0) &&
10835 (rack->rc_suspicious == 0) &&
10836 (rack->r_ctl.rc_num_maps_alloced <= (tcp_map_minimum/2)))) {
10837 rack->sack_attack_disable = 0;
10838 rack_log_sad(rack, 3);
10839 /* Restart counting */
10840 rack->r_ctl.sack_count = 0;
10841 rack->r_ctl.sack_moved_extra = 0;
10842 rack->r_ctl.sack_noextra_move = 1;
10843 rack->rc_suspicious = 0;
10844 rack->r_ctl.ack_count = max(1,
10845 (bytes_this_ack / segsiz));
10847 counter_u64_add(rack_sack_attacks_reversed, 1);
10848 /* Restore the cwnd */
10849 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd)
10850 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd;
10858 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end)
10861 uint32_t am, l_end;
10864 if (SEQ_GT(end, start))
10868 if ((rack->rc_last_tlp_acked_set ) &&
10869 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) &&
10870 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) {
10872 * The DSACK is because of a TLP which we don't
10873 * do anything with the reordering window over since
10874 * it was not reordering that caused the DSACK but
10875 * our previous retransmit TLP.
10877 rack_log_dsack_event(rack, 7, __LINE__, start, end);
10879 goto skip_dsack_round;
10881 if (rack->rc_last_sent_tlp_seq_valid) {
10882 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len;
10883 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) &&
10884 (SEQ_LEQ(end, l_end))) {
10886 * This dsack is from the last sent TLP, ignore it
10887 * for reordering purposes.
10889 rack_log_dsack_event(rack, 7, __LINE__, start, end);
10891 goto skip_dsack_round;
10894 if (rack->rc_dsack_round_seen == 0) {
10895 rack->rc_dsack_round_seen = 1;
10896 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max;
10897 rack->r_ctl.num_dsack++;
10898 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */
10899 rack_log_dsack_event(rack, 2, __LINE__, 0, 0);
10903 * We keep track of how many DSACK blocks we get
10904 * after a recovery incident.
10906 rack->r_ctl.dsack_byte_cnt += am;
10907 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
10908 rack->r_ctl.retran_during_recovery &&
10909 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) {
10911 * False recovery most likely culprit is reordering. If
10912 * nothing else is missing we need to revert.
10914 rack->r_might_revert = 1;
10915 rack_handle_might_revert(rack->rc_tp, rack);
10916 rack->r_might_revert = 0;
10917 rack->r_ctl.retran_during_recovery = 0;
10918 rack->r_ctl.dsack_byte_cnt = 0;
10924 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una)
10926 return (((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt);
10930 rack_compute_pipe(struct tcpcb *tp)
10932 return ((int32_t)do_rack_compute_pipe(tp,
10933 (struct tcp_rack *)tp->t_fb_ptr,
10938 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack)
10940 /* Deal with changed and PRR here (in recovery only) */
10941 uint32_t pipe, snd_una;
10943 rack->r_ctl.rc_prr_delivered += changed;
10945 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) {
10947 * It is all outstanding, we are application limited
10948 * and thus we don't need more room to send anything.
10949 * Note we use tp->snd_una here and not th_ack because
10950 * the data as yet not been cut from the sb.
10952 rack->r_ctl.rc_prr_sndcnt = 0;
10955 /* Compute prr_sndcnt */
10956 if (SEQ_GT(tp->snd_una, th_ack)) {
10957 snd_una = tp->snd_una;
10961 pipe = do_rack_compute_pipe(tp, rack, snd_una);
10962 if (pipe > tp->snd_ssthresh) {
10965 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
10966 if (rack->r_ctl.rc_prr_recovery_fs > 0)
10967 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
10969 rack->r_ctl.rc_prr_sndcnt = 0;
10970 rack_log_to_prr(rack, 9, 0, __LINE__);
10974 if (sndcnt > (long)rack->r_ctl.rc_prr_out)
10975 sndcnt -= rack->r_ctl.rc_prr_out;
10978 rack->r_ctl.rc_prr_sndcnt = sndcnt;
10979 rack_log_to_prr(rack, 10, 0, __LINE__);
10983 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
10984 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
10987 if (changed > limit)
10989 limit += ctf_fixed_maxseg(tp);
10990 if (tp->snd_ssthresh > pipe) {
10991 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
10992 rack_log_to_prr(rack, 11, 0, __LINE__);
10994 rack->r_ctl.rc_prr_sndcnt = min(0, limit);
10995 rack_log_to_prr(rack, 12, 0, __LINE__);
11001 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck,
11002 int *dsack_seen, int *sacks_seen)
11005 struct tcp_rack *rack;
11006 struct rack_sendmap *rsm;
11007 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
11008 register uint32_t th_ack;
11009 int32_t i, j, k, num_sack_blks = 0;
11010 uint32_t cts, acked, ack_point;
11011 int loop_start = 0, moved_two = 0, no_extra = 0;
11013 uint32_t segsiz, o_cnt;
11016 INP_WLOCK_ASSERT(tptoinpcb(tp));
11017 if (tcp_get_flags(th) & TH_RST) {
11018 /* We don't log resets */
11021 rack = (struct tcp_rack *)tp->t_fb_ptr;
11022 cts = tcp_get_usecs(NULL);
11023 rsm = tqhash_min(rack->r_ctl.tqh);
11025 th_ack = th->th_ack;
11026 if (rack->sack_attack_disable == 0)
11027 rack_do_decay(rack);
11028 segsiz = ctf_fixed_maxseg(rack->rc_tp);
11029 if (BYTES_THIS_ACK(tp, th) >= segsiz) {
11031 * You only get credit for
11032 * MSS and greater (and you get extra
11033 * credit for larger cum-ack moves).
11037 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp);
11038 rack->r_ctl.ack_count += ac;
11039 counter_u64_add(rack_ack_total, ac);
11041 if (rack->r_ctl.ack_count > 0xfff00000) {
11043 * reduce the number to keep us under
11046 rack->r_ctl.ack_count /= 2;
11047 rack->r_ctl.sack_count /= 2;
11049 if (SEQ_GT(th_ack, tp->snd_una)) {
11050 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
11051 tp->t_acktime = ticks;
11053 if (rsm && SEQ_GT(th_ack, rsm->r_start))
11054 changed = th_ack - rsm->r_start;
11056 rack_process_to_cumack(tp, rack, th_ack, cts, to,
11057 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time));
11059 if ((to->to_flags & TOF_SACK) == 0) {
11060 /* We are done nothing left and no sack. */
11061 rack_handle_might_revert(tp, rack);
11063 * For cases where we struck a dup-ack
11064 * with no SACK, add to the changes so
11065 * PRR will work right.
11067 if (dup_ack_struck && (changed == 0)) {
11068 changed += ctf_fixed_maxseg(rack->rc_tp);
11072 /* Sack block processing */
11073 if (SEQ_GT(th_ack, tp->snd_una))
11074 ack_point = th_ack;
11076 ack_point = tp->snd_una;
11077 for (i = 0; i < to->to_nsacks; i++) {
11078 bcopy((to->to_sacks + i * TCPOLEN_SACK),
11079 &sack, sizeof(sack));
11080 sack.start = ntohl(sack.start);
11081 sack.end = ntohl(sack.end);
11082 if (SEQ_GT(sack.end, sack.start) &&
11083 SEQ_GT(sack.start, ack_point) &&
11084 SEQ_LT(sack.start, tp->snd_max) &&
11085 SEQ_GT(sack.end, ack_point) &&
11086 SEQ_LEQ(sack.end, tp->snd_max)) {
11087 sack_blocks[num_sack_blks] = sack;
11089 } else if (SEQ_LEQ(sack.start, th_ack) &&
11090 SEQ_LEQ(sack.end, th_ack)) {
11093 if (dsack_seen != NULL)
11095 was_tlp = rack_note_dsack(rack, sack.start, sack.end);
11097 * Its a D-SACK block.
11099 tcp_record_dsack(tp, sack.start, sack.end, was_tlp);
11102 if (rack->rc_dsack_round_seen) {
11103 /* Is the dsack roound over? */
11104 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) {
11106 rack->rc_dsack_round_seen = 0;
11107 rack_log_dsack_event(rack, 3, __LINE__, 0, 0);
11111 * Sort the SACK blocks so we can update the rack scoreboard with
11114 o_cnt = num_sack_blks;
11115 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks,
11116 num_sack_blks, th->th_ack);
11117 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks);
11118 if (sacks_seen != NULL)
11119 *sacks_seen = num_sack_blks;
11120 if (num_sack_blks == 0) {
11121 /* Nothing to sack, but we need to update counts */
11122 if ((o_cnt == 1) &&
11123 (*dsack_seen != 1))
11124 rack->r_ctl.sack_count++;
11125 else if (o_cnt > 1)
11126 rack->r_ctl.sack_count++;
11127 goto out_with_totals;
11129 if (rack->sack_attack_disable) {
11131 * An attacker disablement is in place, for
11132 * every sack block that is not at least a full MSS
11133 * count up sack_count.
11135 for (i = 0; i < num_sack_blks; i++) {
11136 if ((sack_blocks[i].end - sack_blocks[i].start) < segsiz) {
11137 rack->r_ctl.sack_count++;
11139 if (rack->r_ctl.sack_count > 0xfff00000) {
11141 * reduce the number to keep us under
11144 rack->r_ctl.ack_count /= 2;
11145 rack->r_ctl.sack_count /= 2;
11150 /* Its a sack of some sort */
11151 rack->r_ctl.sack_count += num_sack_blks;
11152 if (rack->r_ctl.sack_count > 0xfff00000) {
11154 * reduce the number to keep us under
11157 rack->r_ctl.ack_count /= 2;
11158 rack->r_ctl.sack_count /= 2;
11160 if (num_sack_blks < 2) {
11161 /* Only one, we don't need to sort */
11164 /* Sort the sacks */
11165 for (i = 0; i < num_sack_blks; i++) {
11166 for (j = i + 1; j < num_sack_blks; j++) {
11167 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
11168 sack = sack_blocks[i];
11169 sack_blocks[i] = sack_blocks[j];
11170 sack_blocks[j] = sack;
11175 * Now are any of the sack block ends the same (yes some
11176 * implementations send these)?
11179 if (num_sack_blks == 0)
11180 goto out_with_totals;
11181 if (num_sack_blks > 1) {
11182 for (i = 0; i < num_sack_blks; i++) {
11183 for (j = i + 1; j < num_sack_blks; j++) {
11184 if (sack_blocks[i].end == sack_blocks[j].end) {
11186 * Ok these two have the same end we
11187 * want the smallest end and then
11188 * throw away the larger and start
11191 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
11193 * The second block covers
11194 * more area use that
11196 sack_blocks[i].start = sack_blocks[j].start;
11199 * Now collapse out the dup-sack and
11202 for (k = (j + 1); k < num_sack_blks; k++) {
11203 sack_blocks[j].start = sack_blocks[k].start;
11204 sack_blocks[j].end = sack_blocks[k].end;
11215 * First lets look to see if
11216 * we have retransmitted and
11217 * can use the transmit next?
11219 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
11221 SEQ_GT(sack_blocks[0].end, rsm->r_start) &&
11222 SEQ_LT(sack_blocks[0].start, rsm->r_end)) {
11224 * We probably did the FR and the next
11225 * SACK in continues as we would expect.
11227 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &no_extra, &moved_two, segsiz);
11229 rack->r_wanted_output = 1;
11232 if (num_sack_blks == 1) {
11234 * This is what we would expect from
11235 * a normal implementation to happen
11236 * after we have retransmitted the FR,
11237 * i.e the sack-filter pushes down
11238 * to 1 block and the next to be retransmitted
11239 * is the sequence in the sack block (has more
11240 * are acked). Count this as ACK'd data to boost
11241 * up the chances of recovering any false positives.
11243 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp));
11244 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp)));
11245 counter_u64_add(rack_express_sack, 1);
11246 if (rack->r_ctl.ack_count > 0xfff00000) {
11248 * reduce the number to keep us under
11251 rack->r_ctl.ack_count /= 2;
11252 rack->r_ctl.sack_count /= 2;
11256 * If we did not get a SACK for at least a MSS and
11257 * had to move at all, or if we moved more than our
11258 * threshold, it counts against the "extra" move.
11260 rack->r_ctl.sack_moved_extra += moved_two;
11261 rack->r_ctl.sack_noextra_move += no_extra;
11262 counter_u64_add(rack_move_some, 1);
11265 * else we did not have to move
11266 * any more than we would expect.
11268 rack->r_ctl.sack_noextra_move += no_extra;
11269 rack->r_ctl.sack_noextra_move++;
11270 counter_u64_add(rack_move_none, 1);
11272 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) ||
11273 (rack->r_ctl.sack_noextra_move > 0xfff00000)) {
11274 rack->r_ctl.sack_moved_extra /= 2;
11275 rack->r_ctl.sack_noextra_move /= 2;
11277 goto out_with_totals;
11280 * Start the loop through the
11281 * rest of blocks, past the first block.
11286 counter_u64_add(rack_sack_total, 1);
11287 rsm = rack->r_ctl.rc_sacklast;
11288 for (i = loop_start; i < num_sack_blks; i++) {
11289 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &no_extra, &moved_two, segsiz);
11291 rack->r_wanted_output = 1;
11296 * If we did not get a SACK for at least a MSS and
11297 * had to move at all, or if we moved more than our
11298 * threshold, it counts against the "extra" move.
11300 rack->r_ctl.sack_moved_extra += moved_two;
11301 rack->r_ctl.sack_noextra_move += no_extra;
11302 counter_u64_add(rack_move_some, 1);
11305 * else we did not have to move
11306 * any more than we would expect.
11308 rack->r_ctl.sack_noextra_move += no_extra;
11309 rack->r_ctl.sack_noextra_move++;
11310 counter_u64_add(rack_move_none, 1);
11312 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) ||
11313 (rack->r_ctl.sack_noextra_move > 0xfff00000)) {
11314 rack->r_ctl.sack_moved_extra /= 2;
11315 rack->r_ctl.sack_noextra_move /= 2;
11317 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) {
11319 * If the SACK was not a full MSS then
11320 * we add to sack_count the number of
11321 * MSS's (or possibly more than
11322 * a MSS if its a TSO send) we had to skip by.
11324 rack->r_ctl.sack_count += moved_two;
11325 if (rack->r_ctl.sack_count > 0xfff00000) {
11326 rack->r_ctl.ack_count /= 2;
11327 rack->r_ctl.sack_count /= 2;
11329 counter_u64_add(rack_sack_total, moved_two);
11332 * Now we need to setup for the next
11333 * round. First we make sure we won't
11334 * exceed the size of our uint32_t on
11335 * the various counts, and then clear out
11342 if (num_sack_blks > 1) {
11344 * You get an extra stroke if
11345 * you have more than one sack-blk, this
11346 * could be where we are skipping forward
11347 * and the sack-filter is still working, or
11348 * it could be an attacker constantly
11351 rack->r_ctl.sack_moved_extra++;
11352 counter_u64_add(rack_move_some, 1);
11355 #ifdef TCP_SAD_DETECTION
11356 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp));
11359 /* Something changed cancel the rack timer */
11360 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
11362 tsused = tcp_get_usecs(NULL);
11363 rsm = tcp_rack_output(tp, rack, tsused);
11364 if ((!IN_FASTRECOVERY(tp->t_flags)) &&
11366 ((rsm->r_flags & RACK_MUST_RXT) == 0)) {
11367 /* Enter recovery */
11368 entered_recovery = 1;
11369 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
11371 * When we enter recovery we need to assure we send
11374 if (rack->rack_no_prr == 0) {
11375 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
11376 rack_log_to_prr(rack, 8, 0, __LINE__);
11378 rack->r_timer_override = 1;
11380 rack->r_ctl.rc_agg_early = 0;
11381 } else if (IN_FASTRECOVERY(tp->t_flags) &&
11383 (rack->r_rr_config == 3)) {
11385 * Assure we can output and we get no
11386 * remembered pace time except the retransmit.
11388 rack->r_timer_override = 1;
11389 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
11390 rack->r_ctl.rc_resend = rsm;
11392 if (IN_FASTRECOVERY(tp->t_flags) &&
11393 (rack->rack_no_prr == 0) &&
11394 (entered_recovery == 0)) {
11395 rack_update_prr(tp, rack, changed, th_ack);
11396 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) &&
11397 ((tcp_in_hpts(rack->rc_inp) == 0) &&
11398 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) {
11400 * If you are pacing output you don't want
11404 rack->r_ctl.rc_agg_early = 0;
11405 rack->r_timer_override = 1;
11411 rack_strike_dupack(struct tcp_rack *rack)
11413 struct rack_sendmap *rsm;
11415 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
11418 * We need to skip anything already set
11419 * to be retransmitted.
11421 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
11422 (rsm->r_flags & RACK_MUST_RXT)) {
11423 rsm = TAILQ_NEXT(rsm, r_tnext);
11428 if (rsm && (rsm->r_dupack < 0xff)) {
11430 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) {
11434 * Here we see if we need to retransmit. For
11435 * a SACK type connection if enough time has passed
11436 * we will get a return of the rsm. For a non-sack
11437 * connection we will get the rsm returned if the
11438 * dupack value is 3 or more.
11440 cts = tcp_get_usecs(&tv);
11441 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts);
11442 if (rack->r_ctl.rc_resend != NULL) {
11443 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) {
11444 rack_cong_signal(rack->rc_tp, CC_NDUPACK,
11445 rack->rc_tp->snd_una, __LINE__);
11447 rack->r_wanted_output = 1;
11448 rack->r_timer_override = 1;
11449 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3);
11452 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3);
11458 rack_check_bottom_drag(struct tcpcb *tp,
11459 struct tcp_rack *rack,
11462 uint32_t segsiz, minseg;
11464 segsiz = ctf_fixed_maxseg(tp);
11466 if (tp->snd_max == tp->snd_una) {
11468 * We are doing dynamic pacing and we are way
11469 * under. Basically everything got acked while
11470 * we were still waiting on the pacer to expire.
11472 * This means we need to boost the b/w in
11473 * addition to any earlier boosting of
11478 lt_bw = rack_get_lt_bw(rack);
11479 rack->rc_dragged_bottom = 1;
11480 rack_validate_multipliers_at_or_above100(rack);
11481 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) &&
11484 * Lets use the long-term b/w we have
11485 * been getting as a base.
11487 if (rack->rc_gp_filled == 0) {
11488 if (lt_bw > ONE_POINT_TWO_MEG) {
11490 * If we have no measurement
11491 * don't let us set in more than
11492 * 1.2Mbps. If we are still too
11493 * low after pacing with this we
11494 * will hopefully have a max b/w
11495 * available to sanity check things.
11497 lt_bw = ONE_POINT_TWO_MEG;
11499 rack->r_ctl.rc_rtt_diff = 0;
11500 rack->r_ctl.gp_bw = lt_bw;
11501 rack->rc_gp_filled = 1;
11502 if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
11503 rack->r_ctl.num_measurements = RACK_REQ_AVG;
11504 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
11505 } else if (lt_bw > rack->r_ctl.gp_bw) {
11506 rack->r_ctl.rc_rtt_diff = 0;
11507 if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
11508 rack->r_ctl.num_measurements = RACK_REQ_AVG;
11509 rack->r_ctl.gp_bw = lt_bw;
11510 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
11512 rack_increase_bw_mul(rack, -1, 0, 0, 1);
11513 if ((rack->gp_ready == 0) &&
11514 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
11515 /* We have enough measurements now */
11516 rack->gp_ready = 1;
11517 if ((rack->rc_always_pace && (rack->use_fixed_rate == 0)) ||
11519 rack_set_cc_pacing(rack);
11520 if (rack->defer_options)
11521 rack_apply_deferred_options(rack);
11525 * zero rtt possibly?, settle for just an old increase.
11527 rack_increase_bw_mul(rack, -1, 0, 0, 1);
11529 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
11530 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)),
11532 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) &&
11533 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) &&
11534 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <=
11535 (segsiz * rack_req_segs))) {
11537 * We are doing dynamic GP pacing and
11538 * we have everything except 1MSS or less
11539 * bytes left out. We are still pacing away.
11540 * And there is data that could be sent, This
11541 * means we are inserting delayed ack time in
11542 * our measurements because we are pacing too slow.
11544 rack_validate_multipliers_at_or_above100(rack);
11545 rack->rc_dragged_bottom = 1;
11546 rack_increase_bw_mul(rack, -1, 0, 0, 1);
11550 #ifdef TCP_REQUEST_TRK
11552 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq,
11553 struct http_sendfile_track *cur, uint8_t mod, int line, int err)
11557 do_log = tcp_bblogging_on(rack->rc_tp);
11559 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0)
11561 /* We only allow the three below with point logging on */
11562 if ((mod != HYBRID_LOG_RULES_APP) &&
11563 (mod != HYBRID_LOG_RULES_SET) &&
11564 (mod != HYBRID_LOG_REQ_COMP))
11569 union tcp_log_stackspecific log;
11572 /* Convert our ms to a microsecond */
11573 memset(&log, 0, sizeof(log));
11574 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
11575 log.u_bbr.flex1 = seq;
11576 log.u_bbr.cwnd_gain = line;
11580 log.u_bbr.flex2 = cur->start_seq;
11581 log.u_bbr.flex3 = cur->end_seq;
11582 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff);
11583 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff);
11584 log.u_bbr.flex6 = cur->flags;
11585 log.u_bbr.pkts_out = cur->hybrid_flags;
11586 log.u_bbr.rttProp = cur->timestamp;
11587 log.u_bbr.cur_del_rate = cur->cspr;
11588 log.u_bbr.bw_inuse = cur->start;
11589 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff);
11590 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ;
11591 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff);
11592 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ;
11593 log.u_bbr.bbr_state = 1;
11594 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_http_info[0]);
11595 log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct http_sendfile_track));
11597 log.u_bbr.flex2 = err;
11600 * Fill in flex7 to be CHD (catchup|hybrid|DGP)
11602 log.u_bbr.flex7 = rack->rc_catch_up;
11603 log.u_bbr.flex7 <<= 1;
11604 log.u_bbr.flex7 |= rack->rc_hybrid_mode;
11605 log.u_bbr.flex7 <<= 1;
11606 log.u_bbr.flex7 |= rack->dgp_on;
11607 log.u_bbr.flex8 = mod;
11608 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap;
11609 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg;
11610 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
11611 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start;
11612 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error;
11613 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop;
11614 tcp_log_event(rack->rc_tp, NULL,
11615 &rack->rc_inp->inp_socket->so_rcv,
11616 &rack->rc_inp->inp_socket->so_snd,
11617 TCP_HYBRID_PACING_LOG, 0,
11618 0, &log, false, NULL, __func__, __LINE__, &tv);
11623 #ifdef TCP_REQUEST_TRK
11625 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len)
11627 struct http_sendfile_track *rc_cur;
11631 rc_cur = tcp_http_find_req_for_seq(rack->rc_tp, seq);
11632 if (rc_cur == NULL) {
11633 /* If not in the beginning what about the end piece */
11634 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err);
11635 rc_cur = tcp_http_find_req_for_seq(rack->rc_tp, (seq + len - 1));
11639 /* If we find no parameters we are in straight DGP mode */
11640 if(rc_cur == NULL) {
11641 /* None found for this seq, just DGP for now */
11642 rack->r_ctl.client_suggested_maxseg = 0;
11643 rack->rc_catch_up = 0;
11644 rack->r_ctl.bw_rate_cap = 0;
11645 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err);
11646 if (rack->r_ctl.rc_last_sft) {
11647 rack->r_ctl.rc_last_sft = NULL;
11652 * Ok if we have a new entry *or* have never
11653 * set up an entry we need to proceed. If
11654 * we have already set it up this entry we
11655 * just continue along with what we already
11659 if ((rack->r_ctl.rc_last_sft != NULL) &&
11660 (rack->r_ctl.rc_last_sft == rc_cur)) {
11661 /* Its already in place */
11662 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0);
11665 if (rack->rc_hybrid_mode == 0) {
11666 rack->r_ctl.rc_last_sft = rc_cur;
11667 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0);
11670 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){
11671 /* Compensate for all the header overhead's */
11672 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr);
11674 rack->r_ctl.bw_rate_cap = 0;
11675 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS)
11676 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg;
11678 rack->r_ctl.client_suggested_maxseg = 0;
11679 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) &&
11680 (rc_cur->cspr > 0)) {
11683 rack->rc_catch_up = 1;
11685 * Calculate the deadline time, first set the
11686 * time to when the request arrived.
11688 rc_cur->deadline = rc_cur->localtime;
11690 * Next calculate the length and compensate for
11693 len = rc_cur->end - rc_cur->start;
11694 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) {
11696 * This session is doing TLS. Take a swag guess
11699 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len);
11702 * Now considering the size, and the cspr, what is the time that
11703 * would be required at the cspr rate. Here we use the raw
11704 * cspr value since the client only looks at the raw data. We
11705 * do use len which includes TLS overhead, but not the TCP/IP etc.
11706 * That will get made up for in the CU pacing rate set.
11708 len *= HPTS_USEC_IN_SEC;
11709 len /= rc_cur->cspr;
11710 rc_cur->deadline += len;
11712 rack->rc_catch_up = 0;
11713 rc_cur->deadline = 0;
11715 if (rack->r_ctl.client_suggested_maxseg != 0) {
11717 * We need to reset the max pace segs if we have a
11718 * client_suggested_maxseg.
11720 rack_set_pace_segments(tp, rack, __LINE__, NULL);
11722 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0);
11723 /* Remember it for next time and for CU mode */
11724 rack->r_ctl.rc_last_sft = rc_cur;
11729 rack_chk_http_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts)
11731 #ifdef TCP_REQUEST_TRK
11732 struct http_sendfile_track *ent;
11734 ent = rack->r_ctl.rc_last_sft;
11735 if ((ent == NULL) ||
11736 (ent->flags == TCP_HTTP_TRACK_FLG_EMPTY) ||
11737 (SEQ_GEQ(seq, ent->end_seq))) {
11738 /* Time to update the track. */
11739 rack_set_dgp_hybrid_mode(rack, seq, len);
11740 ent = rack->r_ctl.rc_last_sft;
11746 if (SEQ_LT(ent->end_seq, (seq + len))) {
11748 * This is the case where our end_seq guess
11749 * was wrong. This is usually due to TLS having
11750 * more bytes then our guess. It could also be the
11751 * case that the client sent in two requests closely
11752 * and the SB is full of both so we are sending part
11753 * of each (end|beg). In such a case lets move this
11754 * guys end to match the end of this send. That
11755 * way it will complete when all of it is acked.
11757 ent->end_seq = (seq + len);
11758 rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent);
11760 /* Now validate we have set the send time of this one */
11761 if ((ent->flags & TCP_HTTP_TRACK_FLG_FSND) == 0) {
11762 ent->flags |= TCP_HTTP_TRACK_FLG_FSND;
11763 ent->first_send = cts;
11764 ent->sent_at_fs = rack->rc_tp->t_sndbytes;
11765 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes;
11771 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount)
11774 * The fast output path is enabled and we
11775 * have moved the cumack forward. Lets see if
11776 * we can expand forward the fast path length by
11777 * that amount. What we would ideally like to
11778 * do is increase the number of bytes in the
11779 * fast path block (left_to_send) by the
11780 * acked amount. However we have to gate that
11782 * 1) The amount outstanding and the rwnd of the peer
11783 * (i.e. we don't want to exceed the rwnd of the peer).
11785 * 2) The amount of data left in the socket buffer (i.e.
11786 * we can't send beyond what is in the buffer).
11788 * Note that this does not take into account any increase
11789 * in the cwnd. We will only extend the fast path by
11792 uint32_t new_total, gating_val;
11794 new_total = acked_amount + rack->r_ctl.fsb.left_to_send;
11795 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)),
11796 (tp->snd_wnd - (tp->snd_max - tp->snd_una)));
11797 if (new_total <= gating_val) {
11798 /* We can increase left_to_send by the acked amount */
11799 counter_u64_add(rack_extended_rfo, 1);
11800 rack->r_ctl.fsb.left_to_send = new_total;
11801 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))),
11802 ("rack:%p left_to_send:%u sbavail:%u out:%u",
11803 rack, rack->r_ctl.fsb.left_to_send,
11804 sbavail(&rack->rc_inp->inp_socket->so_snd),
11805 (tp->snd_max - tp->snd_una)));
11811 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb)
11814 * Here any sendmap entry that points to the
11815 * beginning mbuf must be adjusted to the correct
11816 * offset. This must be called with:
11817 * 1) The socket buffer locked
11818 * 2) snd_una adjusted to its new position.
11820 * Note that (2) implies rack_ack_received has also
11821 * been called and all the sbcut's have been done.
11823 * We grab the first mbuf in the socket buffer and
11824 * then go through the front of the sendmap, recalculating
11825 * the stored offset for any sendmap entry that has
11826 * that mbuf. We must use the sb functions to do this
11827 * since its possible an add was done has well as
11828 * the subtraction we may have just completed. This should
11829 * not be a penalty though, since we just referenced the sb
11830 * to go in and trim off the mbufs that we freed (of course
11831 * there will be a penalty for the sendmap references though).
11833 * Note also with INVARIANT on, we validate with a KASSERT
11834 * that the first sendmap entry has a soff of 0.
11838 struct rack_sendmap *rsm;
11841 int first_processed = 0;
11844 snd_una = rack->rc_tp->snd_una;
11845 SOCKBUF_LOCK_ASSERT(sb);
11847 rsm = tqhash_min(rack->r_ctl.tqh);
11848 if ((rsm == NULL) || (m == NULL)) {
11849 /* Nothing outstanding */
11852 /* The very first RSM's mbuf must point to the head mbuf in the sb */
11853 KASSERT((rsm->m == m),
11854 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb",
11856 while (rsm->m && (rsm->m == m)) {
11857 /* one to adjust */
11862 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff);
11863 if ((rsm->orig_m_len != m->m_len) ||
11864 (rsm->orig_t_space != M_TRAILINGROOM(m))){
11865 rack_adjust_orig_mlen(rsm);
11867 if (first_processed == 0) {
11868 KASSERT((rsm->soff == 0),
11869 ("Rack:%p rsm:%p -- rsm at head but soff not zero",
11871 first_processed = 1;
11873 if ((rsm->soff != soff) || (rsm->m != tm)) {
11875 * This is not a fatal error, we anticipate it
11876 * might happen (the else code), so we count it here
11877 * so that under invariant we can see that it really
11880 counter_u64_add(rack_adjust_map_bw, 1);
11885 rsm->orig_m_len = rsm->m->m_len;
11886 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
11888 rsm->orig_m_len = 0;
11889 rsm->orig_t_space = 0;
11892 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff);
11894 rsm->orig_m_len = rsm->m->m_len;
11895 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
11897 rsm->orig_m_len = 0;
11898 rsm->orig_t_space = 0;
11901 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
11907 #ifdef TCP_REQUEST_TRK
11909 rack_http_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack)
11911 struct http_sendfile_track *ent;
11914 if ((rack->rc_hybrid_mode == 0) &&
11915 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) {
11917 * Just do normal completions hybrid pacing is not on
11918 * and CLDL is off as well.
11920 tcp_http_check_for_comp(rack->rc_tp, th_ack);
11924 * Originally I was just going to find the th_ack associated
11925 * with an entry. But then I realized a large strech ack could
11926 * in theory ack two or more requests at once. So instead we
11927 * need to find all entries that are completed by th_ack not
11928 * just a single entry and do our logging.
11930 ent = tcp_http_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i);
11931 while (ent != NULL) {
11933 * We may be doing hybrid pacing or CLDL and need more details possibly
11934 * so we do it manually instead of calling
11935 * tcp_http_check_for_comp()
11937 uint64_t laa, tim, data, cbw, ftim;
11939 /* Ok this ack frees it */
11940 rack_log_hybrid(rack, th_ack,
11941 ent, HYBRID_LOG_REQ_COMP, __LINE__, 0);
11942 /* calculate the time based on the ack arrival */
11943 data = ent->end - ent->start;
11944 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time);
11945 if (ent->flags & TCP_HTTP_TRACK_FLG_FSND) {
11946 if (ent->first_send > ent->localtime)
11947 ftim = ent->first_send;
11949 ftim = ent->localtime;
11952 ftim = ent->localtime;
11954 if (laa > ent->localtime)
11958 cbw = data * HPTS_USEC_IN_SEC;
11963 rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent);
11965 * Check to see if we are freeing what we are pointing to send wise
11966 * if so be sure to NULL the pointer so we know we are no longer
11969 if (ent == rack->r_ctl.rc_last_sft)
11970 rack->r_ctl.rc_last_sft = NULL;
11971 /* Generate the log that the tcp_netflix call would have */
11972 tcp_http_log_req_info(rack->rc_tp, ent,
11973 i, TCP_HTTP_REQ_LOG_FREED, 0, 0);
11974 /* Free it and see if there is another one */
11975 tcp_http_free_a_slot(rack->rc_tp, ent);
11976 ent = tcp_http_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i);
11983 * Return value of 1, we do not need to call rack_process_data().
11984 * return value of 0, rack_process_data can be called.
11985 * For ret_val if its 0 the TCP is locked, if its non-zero
11986 * its unlocked and probably unsafe to touch the TCB.
11989 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
11990 struct tcpcb *tp, struct tcpopt *to,
11991 uint32_t tiwin, int32_t tlen,
11992 int32_t * ofia, int32_t thflags, int32_t *ret_val)
11994 int32_t ourfinisacked = 0;
11995 int32_t nsegs, acked_amount;
11997 struct mbuf *mfree;
11998 struct tcp_rack *rack;
11999 int32_t under_pacing = 0;
12000 int32_t recovery = 0;
12002 INP_WLOCK_ASSERT(tptoinpcb(tp));
12004 rack = (struct tcp_rack *)tp->t_fb_ptr;
12005 if (SEQ_GT(th->th_ack, tp->snd_max)) {
12006 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val,
12007 &rack->r_ctl.challenge_ack_ts,
12008 &rack->r_ctl.challenge_ack_cnt);
12009 rack->r_wanted_output = 1;
12012 if (rack->gp_ready &&
12013 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
12016 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
12017 int in_rec, dup_ack_struck = 0;
12018 int dsack_seen = 0, sacks_seen = 0;
12020 in_rec = IN_FASTRECOVERY(tp->t_flags);
12021 if (rack->rc_in_persist) {
12022 tp->t_rxtshift = 0;
12023 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
12024 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
12027 if ((th->th_ack == tp->snd_una) &&
12028 (tiwin == tp->snd_wnd) &&
12029 ((to->to_flags & TOF_SACK) == 0)) {
12030 rack_strike_dupack(rack);
12031 dup_ack_struck = 1;
12033 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)),
12034 dup_ack_struck, &dsack_seen, &sacks_seen);
12035 if ((rack->sack_attack_disable > 0) &&
12036 (th->th_ack == tp->snd_una) &&
12037 (tiwin == tp->snd_wnd) &&
12038 (dsack_seen == 0) &&
12039 (sacks_seen > 0)) {
12041 * If sacks have been disabled we may
12042 * want to strike a dup-ack "ignoring" the
12043 * sack as long as the sack was not a "dsack". Note
12044 * that if no sack is sent (TOF_SACK is off) then the
12045 * normal dsack code above rack_log_ack() would have
12046 * already struck. So this is just to catch the case
12047 * were we are ignoring sacks from this guy due to
12048 * it being a suspected attacker.
12050 rack_strike_dupack(rack);
12054 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
12056 * Old ack, behind (or duplicate to) the last one rcv'd
12057 * Note: We mark reordering is occuring if its
12058 * less than and we have not closed our window.
12060 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) {
12061 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
12062 if (rack->r_ctl.rc_reorder_ts == 0)
12063 rack->r_ctl.rc_reorder_ts = 1;
12068 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
12069 * something we sent.
12071 if (tp->t_flags & TF_NEEDSYN) {
12073 * T/TCP: Connection was half-synchronized, and our SYN has
12074 * been ACK'd (so connection is now fully synchronized). Go
12075 * to non-starred state, increment snd_una for ACK of SYN,
12076 * and check if we can do window scaling.
12078 tp->t_flags &= ~TF_NEEDSYN;
12080 /* Do window scaling? */
12081 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
12082 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
12083 tp->rcv_scale = tp->request_r_scale;
12084 /* Send window already scaled. */
12087 nsegs = max(1, m->m_pkthdr.lro_nsegs);
12089 acked = BYTES_THIS_ACK(tp, th);
12092 * Any time we move the cum-ack forward clear
12093 * keep-alive tied probe-not-answered. The
12094 * persists clears its own on entry.
12096 rack->probe_not_answered = 0;
12098 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
12099 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
12101 * If we just performed our first retransmit, and the ACK arrives
12102 * within our recovery window, then it was a mistake to do the
12103 * retransmit in the first place. Recover our original cwnd and
12104 * ssthresh, and proceed to transmit where we left off.
12106 if ((tp->t_flags & TF_PREVVALID) &&
12107 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
12108 tp->t_flags &= ~TF_PREVVALID;
12109 if (tp->t_rxtshift == 1 &&
12110 (int)(ticks - tp->t_badrxtwin) < 0)
12111 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__);
12114 /* assure we are not backed off */
12115 tp->t_rxtshift = 0;
12116 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
12117 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
12118 rack->rc_tlp_in_progress = 0;
12119 rack->r_ctl.rc_tlp_cnt_out = 0;
12121 * If it is the RXT timer we want to
12122 * stop it, so we can restart a TLP.
12124 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
12125 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
12126 #ifdef TCP_REQUEST_TRK
12127 rack_http_check_for_comp(rack, th->th_ack);
12131 * If we have a timestamp reply, update smoothed round trip time. If
12132 * no timestamp is present but transmit timer is running and timed
12133 * sequence number was acked, update smoothed round trip time. Since
12134 * we now have an rtt measurement, cancel the timer backoff (cf.,
12135 * Phil Karn's retransmit alg.). Recompute the initial retransmit
12138 * Some boxes send broken timestamp replies during the SYN+ACK
12139 * phase, ignore timestamps of 0 or we could calculate a huge RTT
12140 * and blow up the retransmit timer.
12143 * If all outstanding data is acked, stop retransmit timer and
12144 * remember to restart (more output or persist). If there is more
12145 * data to be acked, restart retransmit timer, using current
12146 * (possibly backed-off) value.
12150 *ofia = ourfinisacked;
12153 if (IN_RECOVERY(tp->t_flags)) {
12154 if (SEQ_LT(th->th_ack, tp->snd_recover) &&
12155 (SEQ_LT(th->th_ack, tp->snd_max))) {
12156 tcp_rack_partialack(tp);
12158 rack_post_recovery(tp, th->th_ack);
12163 * Let the congestion control algorithm update congestion control
12164 * related information. This typically means increasing the
12165 * congestion window.
12167 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery);
12168 SOCKBUF_LOCK(&so->so_snd);
12169 acked_amount = min(acked, (int)sbavail(&so->so_snd));
12170 tp->snd_wnd -= acked_amount;
12171 mfree = sbcut_locked(&so->so_snd, acked_amount);
12172 if ((sbused(&so->so_snd) == 0) &&
12173 (acked > acked_amount) &&
12174 (tp->t_state >= TCPS_FIN_WAIT_1) &&
12175 (tp->t_flags & TF_SENTFIN)) {
12177 * We must be sure our fin
12178 * was sent and acked (we can be
12179 * in FIN_WAIT_1 without having
12184 tp->snd_una = th->th_ack;
12186 if (acked_amount && sbavail(&so->so_snd))
12187 rack_adjust_sendmap_head(rack, &so->so_snd);
12188 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
12189 /* NB: sowwakeup_locked() does an implicit unlock. */
12190 sowwakeup_locked(so);
12191 /* now check the rxt clamps */
12192 if ((recovery == 1) &&
12193 (rack->excess_rxt_on) &&
12194 (rack->r_cwnd_was_clamped == 0)) {
12195 do_rack_excess_rxt(tp, rack);
12196 } else if (rack->r_cwnd_was_clamped)
12197 do_rack_check_for_unclamp(tp, rack);
12199 if (SEQ_GT(tp->snd_una, tp->snd_recover))
12200 tp->snd_recover = tp->snd_una;
12202 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
12203 tp->snd_nxt = tp->snd_una;
12205 if (under_pacing &&
12206 (rack->use_fixed_rate == 0) &&
12207 (rack->in_probe_rtt == 0) &&
12208 rack->rc_gp_dyn_mul &&
12209 rack->rc_always_pace) {
12210 /* Check if we are dragging bottom */
12211 rack_check_bottom_drag(tp, rack, so);
12213 if (tp->snd_una == tp->snd_max) {
12214 /* Nothing left outstanding */
12215 tp->t_flags &= ~TF_PREVVALID;
12216 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
12217 rack->r_ctl.retran_during_recovery = 0;
12218 rack->r_ctl.dsack_byte_cnt = 0;
12219 if (rack->r_ctl.rc_went_idle_time == 0)
12220 rack->r_ctl.rc_went_idle_time = 1;
12221 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
12222 if (sbavail(&tptosocket(tp)->so_snd) == 0)
12224 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
12225 rack->rc_suspicious = 0;
12226 /* Set need output so persist might get set */
12227 rack->r_wanted_output = 1;
12228 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
12229 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
12230 (sbavail(&so->so_snd) == 0) &&
12231 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
12233 * The socket was gone and the
12234 * peer sent data (now or in the past), time to
12238 /* tcp_close will kill the inp pre-log the Reset */
12239 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
12240 tp = tcp_close(tp);
12241 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
12246 *ofia = ourfinisacked;
12252 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line,
12253 int dir, uint32_t flags, struct rack_sendmap *rsm)
12255 if (tcp_bblogging_on(rack->rc_tp)) {
12256 union tcp_log_stackspecific log;
12259 memset(&log, 0, sizeof(log));
12260 log.u_bbr.flex1 = cnt;
12261 log.u_bbr.flex2 = split;
12262 log.u_bbr.flex3 = out;
12263 log.u_bbr.flex4 = line;
12264 log.u_bbr.flex5 = rack->r_must_retran;
12265 log.u_bbr.flex6 = flags;
12266 log.u_bbr.flex7 = rack->rc_has_collapsed;
12267 log.u_bbr.flex8 = dir; /*
12268 * 1 is collapsed, 0 is uncollapsed,
12269 * 2 is log of a rsm being marked, 3 is a split.
12272 log.u_bbr.rttProp = 0;
12274 log.u_bbr.rttProp = (uint64_t)rsm;
12275 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
12276 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
12277 TCP_LOG_EVENTP(rack->rc_tp, NULL,
12278 &rack->rc_inp->inp_socket->so_rcv,
12279 &rack->rc_inp->inp_socket->so_snd,
12280 TCP_RACK_LOG_COLLAPSE, 0,
12281 0, &log, false, &tv);
12286 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line)
12289 * Here all we do is mark the collapsed point and set the flag.
12290 * This may happen again and again, but there is no
12291 * sense splitting our map until we know where the
12292 * peer finally lands in the collapse.
12294 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND);
12295 if ((rack->rc_has_collapsed == 0) ||
12296 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd)))
12297 counter_u64_add(rack_collapsed_win_seen, 1);
12298 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd;
12299 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max;
12300 rack->rc_has_collapsed = 1;
12301 rack->r_collapse_point_valid = 1;
12302 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL);
12306 rack_un_collapse_window(struct tcp_rack *rack, int line)
12308 struct rack_sendmap *nrsm, *rsm;
12309 int cnt = 0, split = 0;
12310 int insret __diagused;
12313 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND);
12314 rack->rc_has_collapsed = 0;
12315 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point);
12317 /* Nothing to do maybe the peer ack'ed it all */
12318 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL);
12321 /* Now do we need to split this one? */
12322 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) {
12323 rack_log_collapse(rack, rsm->r_start, rsm->r_end,
12324 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm);
12325 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
12326 if (nrsm == NULL) {
12327 /* We can't get a rsm, mark all? */
12333 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point);
12335 (void)tqhash_insert(rack->r_ctl.tqh, nrsm);
12337 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
12338 panic("Insert in rb tree of %p fails ret:%d rack:%p rsm:%p",
12339 nrsm, insret, rack, rsm);
12342 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT,
12343 rack->r_ctl.last_collapse_point, __LINE__);
12344 if (rsm->r_in_tmap) {
12345 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
12346 nrsm->r_in_tmap = 1;
12349 * Set in the new RSM as the
12350 * collapsed starting point
12356 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) {
12358 nrsm->r_flags |= RACK_RWND_COLLAPSED;
12359 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm);
12363 counter_u64_add(rack_collapsed_win, 1);
12365 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL);
12369 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack,
12370 int32_t tlen, int32_t tfo_syn)
12372 if (DELAY_ACK(tp, tlen) || tfo_syn) {
12373 rack_timer_cancel(tp, rack,
12374 rack->r_ctl.rc_rcvtime, __LINE__);
12375 tp->t_flags |= TF_DELACK;
12377 rack->r_wanted_output = 1;
12378 tp->t_flags |= TF_ACKNOW;
12383 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack)
12386 * If fast output is in progress, lets validate that
12387 * the new window did not shrink on us and make it
12388 * so fast output should end.
12390 if (rack->r_fast_output) {
12394 * Calculate what we will send if left as is
12395 * and compare that to our send window.
12397 out = ctf_outstanding(tp);
12398 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) {
12399 /* ok we have an issue */
12400 if (out >= tp->snd_wnd) {
12401 /* Turn off fast output the window is met or collapsed */
12402 rack->r_fast_output = 0;
12404 /* we have some room left */
12405 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out;
12406 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) {
12407 /* If not at least 1 full segment never mind */
12408 rack->r_fast_output = 0;
12417 * Return value of 1, the TCB is unlocked and most
12418 * likely gone, return value of 0, the TCP is still
12422 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
12423 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
12424 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
12427 * Update window information. Don't look at window if no ACK: TAC's
12428 * send garbage on first SYN.
12432 struct tcp_rack *rack;
12434 INP_WLOCK_ASSERT(tptoinpcb(tp));
12436 rack = (struct tcp_rack *)tp->t_fb_ptr;
12437 nsegs = max(1, m->m_pkthdr.lro_nsegs);
12438 if ((thflags & TH_ACK) &&
12439 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
12440 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
12441 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
12442 /* keep track of pure window updates */
12444 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
12445 KMOD_TCPSTAT_INC(tcps_rcvwinupd);
12446 tp->snd_wnd = tiwin;
12447 rack_validate_fo_sendwin_up(tp, rack);
12448 tp->snd_wl1 = th->th_seq;
12449 tp->snd_wl2 = th->th_ack;
12450 if (tp->snd_wnd > tp->max_sndwnd)
12451 tp->max_sndwnd = tp->snd_wnd;
12452 rack->r_wanted_output = 1;
12453 } else if (thflags & TH_ACK) {
12454 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
12455 tp->snd_wnd = tiwin;
12456 rack_validate_fo_sendwin_up(tp, rack);
12457 tp->snd_wl1 = th->th_seq;
12458 tp->snd_wl2 = th->th_ack;
12461 if (tp->snd_wnd < ctf_outstanding(tp))
12462 /* The peer collapsed the window */
12463 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__);
12464 else if (rack->rc_has_collapsed)
12465 rack_un_collapse_window(rack, __LINE__);
12466 if ((rack->r_collapse_point_valid) &&
12467 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point)))
12468 rack->r_collapse_point_valid = 0;
12469 /* Was persist timer active and now we have window space? */
12470 if ((rack->rc_in_persist != 0) &&
12471 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
12472 rack->r_ctl.rc_pace_min_segs))) {
12473 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime);
12474 tp->snd_nxt = tp->snd_max;
12475 /* Make sure we output to start the timer */
12476 rack->r_wanted_output = 1;
12478 /* Do we enter persists? */
12479 if ((rack->rc_in_persist == 0) &&
12480 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
12481 TCPS_HAVEESTABLISHED(tp->t_state) &&
12482 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
12483 sbavail(&tptosocket(tp)->so_snd) &&
12484 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
12486 * Here the rwnd is less than
12487 * the pacing size, we are established,
12488 * nothing is outstanding, and there is
12489 * data to send. Enter persists.
12491 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una);
12493 if (tp->t_flags2 & TF2_DROP_AF_DATA) {
12498 * don't process the URG bit, ignore them drag
12501 tp->rcv_up = tp->rcv_nxt;
12504 * Process the segment text, merging it into the TCP sequencing
12505 * queue, and arranging for acknowledgment of receipt if necessary.
12506 * This process logically involves adjusting tp->rcv_wnd as data is
12507 * presented to the user (this happens in tcp_usrreq.c, case
12508 * PRU_RCVD). If a FIN has already been received on this connection
12509 * then we just ignore the text.
12511 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
12512 IS_FASTOPEN(tp->t_flags));
12513 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
12514 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
12515 tcp_seq save_start = th->th_seq;
12516 tcp_seq save_rnxt = tp->rcv_nxt;
12517 int save_tlen = tlen;
12519 m_adj(m, drop_hdrlen); /* delayed header drop */
12521 * Insert segment which includes th into TCP reassembly
12522 * queue with control block tp. Set thflags to whether
12523 * reassembly now includes a segment with FIN. This handles
12524 * the common case inline (segment is the next to be
12525 * received on an established connection, and the queue is
12526 * empty), avoiding linkage into and removal from the queue
12527 * and repetition of various conversions. Set DELACK for
12528 * segments received in order, but ack immediately when
12529 * segments are out of order (so fast retransmit can work).
12531 if (th->th_seq == tp->rcv_nxt &&
12533 (TCPS_HAVEESTABLISHED(tp->t_state) ||
12535 #ifdef NETFLIX_SB_LIMITS
12536 u_int mcnt, appended;
12538 if (so->so_rcv.sb_shlim) {
12539 mcnt = m_memcnt(m);
12541 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
12542 CFO_NOSLEEP, NULL) == false) {
12543 counter_u64_add(tcp_sb_shlim_fails, 1);
12549 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn);
12550 tp->rcv_nxt += tlen;
12552 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
12553 (tp->t_fbyte_in == 0)) {
12554 tp->t_fbyte_in = ticks;
12555 if (tp->t_fbyte_in == 0)
12556 tp->t_fbyte_in = 1;
12557 if (tp->t_fbyte_out && tp->t_fbyte_in)
12558 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
12560 thflags = tcp_get_flags(th) & TH_FIN;
12561 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
12562 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
12563 SOCKBUF_LOCK(&so->so_rcv);
12564 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
12567 #ifdef NETFLIX_SB_LIMITS
12570 sbappendstream_locked(&so->so_rcv, m, 0);
12572 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
12573 /* NB: sorwakeup_locked() does an implicit unlock. */
12574 sorwakeup_locked(so);
12575 #ifdef NETFLIX_SB_LIMITS
12576 if (so->so_rcv.sb_shlim && appended != mcnt)
12577 counter_fo_release(so->so_rcv.sb_shlim,
12582 * XXX: Due to the header drop above "th" is
12583 * theoretically invalid by now. Fortunately
12584 * m_adj() doesn't actually frees any mbufs when
12585 * trimming from the head.
12587 tcp_seq temp = save_start;
12589 thflags = tcp_reass(tp, th, &temp, &tlen, m);
12590 tp->t_flags |= TF_ACKNOW;
12591 if (tp->t_flags & TF_WAKESOR) {
12592 tp->t_flags &= ~TF_WAKESOR;
12593 /* NB: sorwakeup_locked() does an implicit unlock. */
12594 sorwakeup_locked(so);
12597 if ((tp->t_flags & TF_SACK_PERMIT) &&
12599 TCPS_HAVEESTABLISHED(tp->t_state)) {
12600 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
12602 * DSACK actually handled in the fastpath
12605 tcp_update_sack_list(tp, save_start,
12606 save_start + save_tlen);
12607 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
12608 if ((tp->rcv_numsacks >= 1) &&
12609 (tp->sackblks[0].end == save_start)) {
12611 * Partial overlap, recorded at todrop
12614 tcp_update_sack_list(tp,
12615 tp->sackblks[0].start,
12616 tp->sackblks[0].end);
12618 tcp_update_dsack_list(tp, save_start,
12619 save_start + save_tlen);
12621 } else if (tlen >= save_tlen) {
12622 /* Update of sackblks. */
12623 tcp_update_dsack_list(tp, save_start,
12624 save_start + save_tlen);
12625 } else if (tlen > 0) {
12626 tcp_update_dsack_list(tp, save_start,
12627 save_start + tlen);
12632 thflags &= ~TH_FIN;
12636 * If FIN is received ACK the FIN and let the user know that the
12637 * connection is closing.
12639 if (thflags & TH_FIN) {
12640 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
12641 /* The socket upcall is handled by socantrcvmore. */
12644 * If connection is half-synchronized (ie NEEDSYN
12645 * flag on) then delay ACK, so it may be piggybacked
12646 * when SYN is sent. Otherwise, since we received a
12647 * FIN then no more input can be expected, send ACK
12650 if (tp->t_flags & TF_NEEDSYN) {
12651 rack_timer_cancel(tp, rack,
12652 rack->r_ctl.rc_rcvtime, __LINE__);
12653 tp->t_flags |= TF_DELACK;
12655 tp->t_flags |= TF_ACKNOW;
12659 switch (tp->t_state) {
12661 * In SYN_RECEIVED and ESTABLISHED STATES enter the
12662 * CLOSE_WAIT state.
12664 case TCPS_SYN_RECEIVED:
12665 tp->t_starttime = ticks;
12667 case TCPS_ESTABLISHED:
12668 rack_timer_cancel(tp, rack,
12669 rack->r_ctl.rc_rcvtime, __LINE__);
12670 tcp_state_change(tp, TCPS_CLOSE_WAIT);
12674 * If still in FIN_WAIT_1 STATE FIN has not been
12675 * acked so enter the CLOSING state.
12677 case TCPS_FIN_WAIT_1:
12678 rack_timer_cancel(tp, rack,
12679 rack->r_ctl.rc_rcvtime, __LINE__);
12680 tcp_state_change(tp, TCPS_CLOSING);
12684 * In FIN_WAIT_2 state enter the TIME_WAIT state,
12685 * starting the time-wait timer, turning off the
12686 * other standard timers.
12688 case TCPS_FIN_WAIT_2:
12689 rack_timer_cancel(tp, rack,
12690 rack->r_ctl.rc_rcvtime, __LINE__);
12696 * Return any desired output.
12698 if ((tp->t_flags & TF_ACKNOW) ||
12699 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
12700 rack->r_wanted_output = 1;
12706 * Here nothing is really faster, its just that we
12707 * have broken out the fast-data path also just like
12711 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
12712 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
12713 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos)
12716 int32_t newsize = 0; /* automatic sockbuf scaling */
12717 struct tcp_rack *rack;
12718 #ifdef NETFLIX_SB_LIMITS
12719 u_int mcnt, appended;
12723 * If last ACK falls within this segment's sequence numbers, record
12724 * the timestamp. NOTE that the test is modified according to the
12725 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
12727 if (__predict_false(th->th_seq != tp->rcv_nxt)) {
12730 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
12733 if (tiwin && tiwin != tp->snd_wnd) {
12736 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
12739 if (__predict_false((to->to_flags & TOF_TS) &&
12740 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
12743 if (__predict_false((th->th_ack != tp->snd_una))) {
12746 if (__predict_false(tlen > sbspace(&so->so_rcv))) {
12749 if ((to->to_flags & TOF_TS) != 0 &&
12750 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
12751 tp->ts_recent_age = tcp_ts_getticks();
12752 tp->ts_recent = to->to_tsval;
12754 rack = (struct tcp_rack *)tp->t_fb_ptr;
12756 * This is a pure, in-sequence data packet with nothing on the
12757 * reassembly queue and we have enough buffer space to take it.
12759 nsegs = max(1, m->m_pkthdr.lro_nsegs);
12761 #ifdef NETFLIX_SB_LIMITS
12762 if (so->so_rcv.sb_shlim) {
12763 mcnt = m_memcnt(m);
12765 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
12766 CFO_NOSLEEP, NULL) == false) {
12767 counter_u64_add(tcp_sb_shlim_fails, 1);
12773 /* Clean receiver SACK report if present */
12774 if (tp->rcv_numsacks)
12775 tcp_clean_sackreport(tp);
12776 KMOD_TCPSTAT_INC(tcps_preddat);
12777 tp->rcv_nxt += tlen;
12779 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
12780 (tp->t_fbyte_in == 0)) {
12781 tp->t_fbyte_in = ticks;
12782 if (tp->t_fbyte_in == 0)
12783 tp->t_fbyte_in = 1;
12784 if (tp->t_fbyte_out && tp->t_fbyte_in)
12785 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
12788 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
12790 tp->snd_wl1 = th->th_seq;
12792 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
12794 tp->rcv_up = tp->rcv_nxt;
12795 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
12796 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
12797 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
12799 /* Add data to socket buffer. */
12800 SOCKBUF_LOCK(&so->so_rcv);
12801 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
12805 * Set new socket buffer size. Give up when limit is
12809 if (!sbreserve_locked(so, SO_RCV, newsize, NULL))
12810 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
12811 m_adj(m, drop_hdrlen); /* delayed header drop */
12812 #ifdef NETFLIX_SB_LIMITS
12815 sbappendstream_locked(&so->so_rcv, m, 0);
12816 ctf_calc_rwin(so, tp);
12818 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
12819 /* NB: sorwakeup_locked() does an implicit unlock. */
12820 sorwakeup_locked(so);
12821 #ifdef NETFLIX_SB_LIMITS
12822 if (so->so_rcv.sb_shlim && mcnt != appended)
12823 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended);
12825 rack_handle_delayed_ack(tp, rack, tlen, 0);
12826 if (tp->snd_una == tp->snd_max)
12827 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
12832 * This subfunction is used to try to highly optimize the
12833 * fast path. We again allow window updates that are
12834 * in sequence to remain in the fast-path. We also add
12835 * in the __predict's to attempt to help the compiler.
12836 * Note that if we return a 0, then we can *not* process
12837 * it and the caller should push the packet into the
12841 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
12842 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
12843 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
12847 int32_t under_pacing = 0;
12848 struct tcp_rack *rack;
12850 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
12851 /* Old ack, behind (or duplicate to) the last one rcv'd */
12854 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
12855 /* Above what we have sent? */
12858 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
12859 /* We are retransmitting */
12862 if (__predict_false(tiwin == 0)) {
12866 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
12867 /* We need a SYN or a FIN, unlikely.. */
12870 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
12871 /* Timestamp is behind .. old ack with seq wrap? */
12874 if (__predict_false(IN_RECOVERY(tp->t_flags))) {
12875 /* Still recovering */
12878 rack = (struct tcp_rack *)tp->t_fb_ptr;
12879 if (rack->r_ctl.rc_sacked) {
12880 /* We have sack holes on our scoreboard */
12883 /* Ok if we reach here, we can process a fast-ack */
12884 if (rack->gp_ready &&
12885 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
12888 nsegs = max(1, m->m_pkthdr.lro_nsegs);
12889 rack_log_ack(tp, to, th, 0, 0, NULL, NULL);
12890 /* Did the window get updated? */
12891 if (tiwin != tp->snd_wnd) {
12892 tp->snd_wnd = tiwin;
12893 rack_validate_fo_sendwin_up(tp, rack);
12894 tp->snd_wl1 = th->th_seq;
12895 if (tp->snd_wnd > tp->max_sndwnd)
12896 tp->max_sndwnd = tp->snd_wnd;
12898 /* Do we exit persists? */
12899 if ((rack->rc_in_persist != 0) &&
12900 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
12901 rack->r_ctl.rc_pace_min_segs))) {
12902 rack_exit_persist(tp, rack, cts);
12904 /* Do we enter persists? */
12905 if ((rack->rc_in_persist == 0) &&
12906 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
12907 TCPS_HAVEESTABLISHED(tp->t_state) &&
12908 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
12909 sbavail(&tptosocket(tp)->so_snd) &&
12910 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
12912 * Here the rwnd is less than
12913 * the pacing size, we are established,
12914 * nothing is outstanding, and there is
12915 * data to send. Enter persists.
12917 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack);
12920 * If last ACK falls within this segment's sequence numbers, record
12921 * the timestamp. NOTE that the test is modified according to the
12922 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
12924 if ((to->to_flags & TOF_TS) != 0 &&
12925 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
12926 tp->ts_recent_age = tcp_ts_getticks();
12927 tp->ts_recent = to->to_tsval;
12930 * This is a pure ack for outstanding data.
12932 KMOD_TCPSTAT_INC(tcps_predack);
12935 * "bad retransmit" recovery.
12937 if ((tp->t_flags & TF_PREVVALID) &&
12938 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
12939 tp->t_flags &= ~TF_PREVVALID;
12940 if (tp->t_rxtshift == 1 &&
12941 (int)(ticks - tp->t_badrxtwin) < 0)
12942 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__);
12945 * Recalculate the transmit timer / rtt.
12947 * Some boxes send broken timestamp replies during the SYN+ACK
12948 * phase, ignore timestamps of 0 or we could calculate a huge RTT
12949 * and blow up the retransmit timer.
12951 acked = BYTES_THIS_ACK(tp, th);
12954 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
12955 hhook_run_tcp_est_in(tp, th, to);
12957 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
12958 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
12960 struct mbuf *mfree;
12962 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0);
12963 SOCKBUF_LOCK(&so->so_snd);
12964 mfree = sbcut_locked(&so->so_snd, acked);
12965 tp->snd_una = th->th_ack;
12966 /* Note we want to hold the sb lock through the sendmap adjust */
12967 rack_adjust_sendmap_head(rack, &so->so_snd);
12968 /* Wake up the socket if we have room to write more */
12969 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
12970 sowwakeup_locked(so);
12972 tp->t_rxtshift = 0;
12973 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
12974 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
12975 rack->rc_tlp_in_progress = 0;
12976 rack->r_ctl.rc_tlp_cnt_out = 0;
12978 * If it is the RXT timer we want to
12979 * stop it, so we can restart a TLP.
12981 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
12982 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
12984 #ifdef TCP_REQUEST_TRK
12985 rack_http_check_for_comp(rack, th->th_ack);
12989 * Let the congestion control algorithm update congestion control
12990 * related information. This typically means increasing the
12991 * congestion window.
12993 if (tp->snd_wnd < ctf_outstanding(tp)) {
12994 /* The peer collapsed the window */
12995 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__);
12996 } else if (rack->rc_has_collapsed)
12997 rack_un_collapse_window(rack, __LINE__);
12998 if ((rack->r_collapse_point_valid) &&
12999 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point)))
13000 rack->r_collapse_point_valid = 0;
13002 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
13004 tp->snd_wl2 = th->th_ack;
13007 /* ND6_HINT(tp); *//* Some progress has been made. */
13010 * If all outstanding data are acked, stop retransmit timer,
13011 * otherwise restart timer using current (possibly backed-off)
13012 * value. If process is waiting for space, wakeup/selwakeup/signal.
13013 * If data are ready to send, let tcp_output decide between more
13014 * output or persist.
13016 if (under_pacing &&
13017 (rack->use_fixed_rate == 0) &&
13018 (rack->in_probe_rtt == 0) &&
13019 rack->rc_gp_dyn_mul &&
13020 rack->rc_always_pace) {
13021 /* Check if we are dragging bottom */
13022 rack_check_bottom_drag(tp, rack, so);
13024 if (tp->snd_una == tp->snd_max) {
13025 tp->t_flags &= ~TF_PREVVALID;
13026 rack->r_ctl.retran_during_recovery = 0;
13027 rack->rc_suspicious = 0;
13028 rack->r_ctl.dsack_byte_cnt = 0;
13029 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
13030 if (rack->r_ctl.rc_went_idle_time == 0)
13031 rack->r_ctl.rc_went_idle_time = 1;
13032 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
13033 if (sbavail(&tptosocket(tp)->so_snd) == 0)
13035 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13037 if (acked && rack->r_fast_output)
13038 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked);
13039 if (sbavail(&so->so_snd)) {
13040 rack->r_wanted_output = 1;
13046 * Return value of 1, the TCB is unlocked and most
13047 * likely gone, return value of 0, the TCP is still
13051 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
13052 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13053 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13055 int32_t ret_val = 0;
13057 int32_t ourfinisacked = 0;
13058 struct tcp_rack *rack;
13060 INP_WLOCK_ASSERT(tptoinpcb(tp));
13062 ctf_calc_rwin(so, tp);
13064 * If the state is SYN_SENT: if seg contains an ACK, but not for our
13065 * SYN, drop the input. if seg contains a RST, then drop the
13066 * connection. if seg does not contain SYN, then drop it. Otherwise
13067 * this is an acceptable SYN segment initialize tp->rcv_nxt and
13068 * tp->irs if seg contains ack then advance tp->snd_una if seg
13069 * contains an ECE and ECN support is enabled, the stream is ECN
13070 * capable. if SYN has been acked change to ESTABLISHED else
13071 * SYN_RCVD state arrange for segment to be acked (eventually)
13072 * continue processing rest of data/controls.
13074 if ((thflags & TH_ACK) &&
13075 (SEQ_LEQ(th->th_ack, tp->iss) ||
13076 SEQ_GT(th->th_ack, tp->snd_max))) {
13077 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
13078 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13081 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
13082 TCP_PROBE5(connect__refused, NULL, tp,
13083 mtod(m, const char *), tp, th);
13084 tp = tcp_drop(tp, ECONNREFUSED);
13085 ctf_do_drop(m, tp);
13088 if (thflags & TH_RST) {
13089 ctf_do_drop(m, tp);
13092 if (!(thflags & TH_SYN)) {
13093 ctf_do_drop(m, tp);
13096 tp->irs = th->th_seq;
13097 tcp_rcvseqinit(tp);
13098 rack = (struct tcp_rack *)tp->t_fb_ptr;
13099 if (thflags & TH_ACK) {
13100 int tfo_partial = 0;
13102 KMOD_TCPSTAT_INC(tcps_connects);
13105 mac_socketpeer_set_from_mbuf(m, so);
13107 /* Do window scaling on this connection? */
13108 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
13109 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
13110 tp->rcv_scale = tp->request_r_scale;
13112 tp->rcv_adv += min(tp->rcv_wnd,
13113 TCP_MAXWIN << tp->rcv_scale);
13115 * If not all the data that was sent in the TFO SYN
13116 * has been acked, resend the remainder right away.
13118 if (IS_FASTOPEN(tp->t_flags) &&
13119 (tp->snd_una != tp->snd_max)) {
13120 tp->snd_nxt = th->th_ack;
13124 * If there's data, delay ACK; if there's also a FIN ACKNOW
13125 * will be turned on later.
13127 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) {
13128 rack_timer_cancel(tp, rack,
13129 rack->r_ctl.rc_rcvtime, __LINE__);
13130 tp->t_flags |= TF_DELACK;
13132 rack->r_wanted_output = 1;
13133 tp->t_flags |= TF_ACKNOW;
13136 tcp_ecn_input_syn_sent(tp, thflags, iptos);
13138 if (SEQ_GT(th->th_ack, tp->snd_una)) {
13140 * We advance snd_una for the
13141 * fast open case. If th_ack is
13142 * acknowledging data beyond
13143 * snd_una we can't just call
13144 * ack-processing since the
13145 * data stream in our send-map
13146 * will start at snd_una + 1 (one
13147 * beyond the SYN). If its just
13148 * equal we don't need to do that
13149 * and there is no send_map.
13154 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
13155 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
13157 tp->t_starttime = ticks;
13158 if (tp->t_flags & TF_NEEDFIN) {
13159 tcp_state_change(tp, TCPS_FIN_WAIT_1);
13160 tp->t_flags &= ~TF_NEEDFIN;
13161 thflags &= ~TH_SYN;
13163 tcp_state_change(tp, TCPS_ESTABLISHED);
13164 TCP_PROBE5(connect__established, NULL, tp,
13165 mtod(m, const char *), tp, th);
13166 rack_cc_conn_init(tp);
13170 * Received initial SYN in SYN-SENT[*] state => simultaneous
13171 * open. If segment contains CC option and there is a
13172 * cached CC, apply TAO test. If it succeeds, connection is *
13173 * half-synchronized. Otherwise, do 3-way handshake:
13174 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
13175 * there was no CC option, clear cached CC value.
13177 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN);
13178 tcp_state_change(tp, TCPS_SYN_RECEIVED);
13181 * Advance th->th_seq to correspond to first data byte. If data,
13182 * trim to stay within window, dropping FIN if necessary.
13185 if (tlen > tp->rcv_wnd) {
13186 todrop = tlen - tp->rcv_wnd;
13188 tlen = tp->rcv_wnd;
13189 thflags &= ~TH_FIN;
13190 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin);
13191 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
13193 tp->snd_wl1 = th->th_seq - 1;
13194 tp->rcv_up = th->th_seq;
13196 * Client side of transaction: already sent SYN and data. If the
13197 * remote host used T/TCP to validate the SYN, our data will be
13198 * ACK'd; if so, enter normal data segment processing in the middle
13199 * of step 5, ack processing. Otherwise, goto step 6.
13201 if (thflags & TH_ACK) {
13202 /* For syn-sent we need to possibly update the rtt */
13203 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
13206 mcts = tcp_ts_getticks();
13207 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
13208 if (!tp->t_rttlow || tp->t_rttlow > t)
13210 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4);
13211 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
13212 tcp_rack_xmit_timer_commit(rack, tp);
13214 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
13216 /* We may have changed to FIN_WAIT_1 above */
13217 if (tp->t_state == TCPS_FIN_WAIT_1) {
13219 * In FIN_WAIT_1 STATE in addition to the processing
13220 * for the ESTABLISHED state if our FIN is now
13221 * acknowledged then enter FIN_WAIT_2.
13223 if (ourfinisacked) {
13225 * If we can't receive any more data, then
13226 * closing user can proceed. Starting the
13227 * timer is contrary to the specification,
13228 * but if we don't get a FIN we'll hang
13231 * XXXjl: we should release the tp also, and
13232 * use a compressed state.
13234 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13235 soisdisconnected(so);
13236 tcp_timer_activate(tp, TT_2MSL,
13237 (tcp_fast_finwait2_recycle ?
13238 tcp_finwait2_timeout :
13241 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13245 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13246 tiwin, thflags, nxt_pkt));
13250 * Return value of 1, the TCB is unlocked and most
13251 * likely gone, return value of 0, the TCP is still
13255 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
13256 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13257 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13259 struct tcp_rack *rack;
13260 int32_t ret_val = 0;
13261 int32_t ourfinisacked = 0;
13263 ctf_calc_rwin(so, tp);
13264 if ((thflags & TH_ACK) &&
13265 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
13266 SEQ_GT(th->th_ack, tp->snd_max))) {
13267 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
13268 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13271 rack = (struct tcp_rack *)tp->t_fb_ptr;
13272 if (IS_FASTOPEN(tp->t_flags)) {
13274 * When a TFO connection is in SYN_RECEIVED, the
13275 * only valid packets are the initial SYN, a
13276 * retransmit/copy of the initial SYN (possibly with
13277 * a subset of the original data), a valid ACK, a
13280 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
13281 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
13282 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13284 } else if (thflags & TH_SYN) {
13285 /* non-initial SYN is ignored */
13286 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
13287 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
13288 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
13289 ctf_do_drop(m, NULL);
13292 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
13293 ctf_do_drop(m, NULL);
13298 if ((thflags & TH_RST) ||
13299 (tp->t_fin_is_rst && (thflags & TH_FIN)))
13300 return (__ctf_process_rst(m, th, so, tp,
13301 &rack->r_ctl.challenge_ack_ts,
13302 &rack->r_ctl.challenge_ack_cnt));
13304 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
13305 * it's less than ts_recent, drop it.
13307 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
13308 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
13309 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
13313 * In the SYN-RECEIVED state, validate that the packet belongs to
13314 * this connection before trimming the data to fit the receive
13315 * window. Check the sequence number versus IRS since we know the
13316 * sequence numbers haven't wrapped. This is a partial fix for the
13317 * "LAND" DoS attack.
13319 if (SEQ_LT(th->th_seq, tp->irs)) {
13320 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
13321 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13324 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
13325 &rack->r_ctl.challenge_ack_ts,
13326 &rack->r_ctl.challenge_ack_cnt)) {
13330 * If last ACK falls within this segment's sequence numbers, record
13331 * its timestamp. NOTE: 1) That the test incorporates suggestions
13332 * from the latest proposal of the tcplw@cray.com list (Braden
13333 * 1993/04/26). 2) That updating only on newer timestamps interferes
13334 * with our earlier PAWS tests, so this check should be solely
13335 * predicated on the sequence space of this segment. 3) That we
13336 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
13337 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
13338 * SEG.Len, This modified check allows us to overcome RFC1323's
13339 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
13340 * p.869. In such cases, we can still calculate the RTT correctly
13341 * when RCV.NXT == Last.ACK.Sent.
13343 if ((to->to_flags & TOF_TS) != 0 &&
13344 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
13345 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
13346 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
13347 tp->ts_recent_age = tcp_ts_getticks();
13348 tp->ts_recent = to->to_tsval;
13350 tp->snd_wnd = tiwin;
13351 rack_validate_fo_sendwin_up(tp, rack);
13353 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
13354 * is on (half-synchronized state), then queue data for later
13355 * processing; else drop segment and return.
13357 if ((thflags & TH_ACK) == 0) {
13358 if (IS_FASTOPEN(tp->t_flags)) {
13359 rack_cc_conn_init(tp);
13361 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13362 tiwin, thflags, nxt_pkt));
13364 KMOD_TCPSTAT_INC(tcps_connects);
13365 if (tp->t_flags & TF_SONOTCONN) {
13366 tp->t_flags &= ~TF_SONOTCONN;
13369 /* Do window scaling? */
13370 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
13371 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
13372 tp->rcv_scale = tp->request_r_scale;
13375 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* ->
13378 tp->t_starttime = ticks;
13379 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
13380 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
13381 tp->t_tfo_pending = NULL;
13383 if (tp->t_flags & TF_NEEDFIN) {
13384 tcp_state_change(tp, TCPS_FIN_WAIT_1);
13385 tp->t_flags &= ~TF_NEEDFIN;
13387 tcp_state_change(tp, TCPS_ESTABLISHED);
13388 TCP_PROBE5(accept__established, NULL, tp,
13389 mtod(m, const char *), tp, th);
13391 * TFO connections call cc_conn_init() during SYN
13392 * processing. Calling it again here for such connections
13393 * is not harmless as it would undo the snd_cwnd reduction
13394 * that occurs when a TFO SYN|ACK is retransmitted.
13396 if (!IS_FASTOPEN(tp->t_flags))
13397 rack_cc_conn_init(tp);
13400 * Account for the ACK of our SYN prior to
13401 * regular ACK processing below, except for
13402 * simultaneous SYN, which is handled later.
13404 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
13407 * If segment contains data or ACK, will call tcp_reass() later; if
13408 * not, do so now to pass queued data to user.
13410 if (tlen == 0 && (thflags & TH_FIN) == 0) {
13411 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
13413 if (tp->t_flags & TF_WAKESOR) {
13414 tp->t_flags &= ~TF_WAKESOR;
13415 /* NB: sorwakeup_locked() does an implicit unlock. */
13416 sorwakeup_locked(so);
13419 tp->snd_wl1 = th->th_seq - 1;
13420 /* For syn-recv we need to possibly update the rtt */
13421 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
13424 mcts = tcp_ts_getticks();
13425 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
13426 if (!tp->t_rttlow || tp->t_rttlow > t)
13428 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5);
13429 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
13430 tcp_rack_xmit_timer_commit(rack, tp);
13432 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
13435 if (tp->t_state == TCPS_FIN_WAIT_1) {
13436 /* We could have went to FIN_WAIT_1 (or EST) above */
13438 * In FIN_WAIT_1 STATE in addition to the processing for the
13439 * ESTABLISHED state if our FIN is now acknowledged then
13440 * enter FIN_WAIT_2.
13442 if (ourfinisacked) {
13444 * If we can't receive any more data, then closing
13445 * user can proceed. Starting the timer is contrary
13446 * to the specification, but if we don't get a FIN
13447 * we'll hang forever.
13449 * XXXjl: we should release the tp also, and use a
13450 * compressed state.
13452 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13453 soisdisconnected(so);
13454 tcp_timer_activate(tp, TT_2MSL,
13455 (tcp_fast_finwait2_recycle ?
13456 tcp_finwait2_timeout :
13459 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13462 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13463 tiwin, thflags, nxt_pkt));
13467 * Return value of 1, the TCB is unlocked and most
13468 * likely gone, return value of 0, the TCP is still
13472 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
13473 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13474 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13476 int32_t ret_val = 0;
13477 struct tcp_rack *rack;
13480 * Header prediction: check for the two common cases of a
13481 * uni-directional data xfer. If the packet has no control flags,
13482 * is in-sequence, the window didn't change and we're not
13483 * retransmitting, it's a candidate. If the length is zero and the
13484 * ack moved forward, we're the sender side of the xfer. Just free
13485 * the data acked & wake any higher level process that was blocked
13486 * waiting for space. If the length is non-zero and the ack didn't
13487 * move, we're the receiver side. If we're getting packets in-order
13488 * (the reassembly queue is empty), add the data toc The socket
13489 * buffer and note that we need a delayed ack. Make sure that the
13490 * hidden state-flags are also off. Since we check for
13491 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
13493 rack = (struct tcp_rack *)tp->t_fb_ptr;
13494 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
13495 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) &&
13496 __predict_true(SEGQ_EMPTY(tp)) &&
13497 __predict_true(th->th_seq == tp->rcv_nxt)) {
13499 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
13500 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
13504 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
13505 tiwin, nxt_pkt, iptos)) {
13510 ctf_calc_rwin(so, tp);
13512 if ((thflags & TH_RST) ||
13513 (tp->t_fin_is_rst && (thflags & TH_FIN)))
13514 return (__ctf_process_rst(m, th, so, tp,
13515 &rack->r_ctl.challenge_ack_ts,
13516 &rack->r_ctl.challenge_ack_cnt));
13519 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
13520 * synchronized state.
13522 if (thflags & TH_SYN) {
13523 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
13527 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
13528 * it's less than ts_recent, drop it.
13530 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
13531 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
13532 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
13535 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
13536 &rack->r_ctl.challenge_ack_ts,
13537 &rack->r_ctl.challenge_ack_cnt)) {
13541 * If last ACK falls within this segment's sequence numbers, record
13542 * its timestamp. NOTE: 1) That the test incorporates suggestions
13543 * from the latest proposal of the tcplw@cray.com list (Braden
13544 * 1993/04/26). 2) That updating only on newer timestamps interferes
13545 * with our earlier PAWS tests, so this check should be solely
13546 * predicated on the sequence space of this segment. 3) That we
13547 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
13548 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
13549 * SEG.Len, This modified check allows us to overcome RFC1323's
13550 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
13551 * p.869. In such cases, we can still calculate the RTT correctly
13552 * when RCV.NXT == Last.ACK.Sent.
13554 if ((to->to_flags & TOF_TS) != 0 &&
13555 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
13556 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
13557 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
13558 tp->ts_recent_age = tcp_ts_getticks();
13559 tp->ts_recent = to->to_tsval;
13562 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
13563 * is on (half-synchronized state), then queue data for later
13564 * processing; else drop segment and return.
13566 if ((thflags & TH_ACK) == 0) {
13567 if (tp->t_flags & TF_NEEDSYN) {
13568 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13569 tiwin, thflags, nxt_pkt));
13571 } else if (tp->t_flags & TF_ACKNOW) {
13572 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
13573 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
13576 ctf_do_drop(m, NULL);
13583 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
13586 if (sbavail(&so->so_snd)) {
13587 if (ctf_progress_timeout_check(tp, true)) {
13588 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
13589 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13593 /* State changes only happen in rack_process_data() */
13594 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13595 tiwin, thflags, nxt_pkt));
13599 * Return value of 1, the TCB is unlocked and most
13600 * likely gone, return value of 0, the TCP is still
13604 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
13605 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13606 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13608 int32_t ret_val = 0;
13609 struct tcp_rack *rack;
13611 rack = (struct tcp_rack *)tp->t_fb_ptr;
13612 ctf_calc_rwin(so, tp);
13613 if ((thflags & TH_RST) ||
13614 (tp->t_fin_is_rst && (thflags & TH_FIN)))
13615 return (__ctf_process_rst(m, th, so, tp,
13616 &rack->r_ctl.challenge_ack_ts,
13617 &rack->r_ctl.challenge_ack_cnt));
13619 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
13620 * synchronized state.
13622 if (thflags & TH_SYN) {
13623 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
13627 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
13628 * it's less than ts_recent, drop it.
13630 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
13631 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
13632 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
13635 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
13636 &rack->r_ctl.challenge_ack_ts,
13637 &rack->r_ctl.challenge_ack_cnt)) {
13641 * If last ACK falls within this segment's sequence numbers, record
13642 * its timestamp. NOTE: 1) That the test incorporates suggestions
13643 * from the latest proposal of the tcplw@cray.com list (Braden
13644 * 1993/04/26). 2) That updating only on newer timestamps interferes
13645 * with our earlier PAWS tests, so this check should be solely
13646 * predicated on the sequence space of this segment. 3) That we
13647 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
13648 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
13649 * SEG.Len, This modified check allows us to overcome RFC1323's
13650 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
13651 * p.869. In such cases, we can still calculate the RTT correctly
13652 * when RCV.NXT == Last.ACK.Sent.
13654 if ((to->to_flags & TOF_TS) != 0 &&
13655 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
13656 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
13657 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
13658 tp->ts_recent_age = tcp_ts_getticks();
13659 tp->ts_recent = to->to_tsval;
13662 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
13663 * is on (half-synchronized state), then queue data for later
13664 * processing; else drop segment and return.
13666 if ((thflags & TH_ACK) == 0) {
13667 if (tp->t_flags & TF_NEEDSYN) {
13668 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13669 tiwin, thflags, nxt_pkt));
13671 } else if (tp->t_flags & TF_ACKNOW) {
13672 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
13673 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
13676 ctf_do_drop(m, NULL);
13683 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
13686 if (sbavail(&so->so_snd)) {
13687 if (ctf_progress_timeout_check(tp, true)) {
13688 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
13689 tp, tick, PROGRESS_DROP, __LINE__);
13690 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13694 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13695 tiwin, thflags, nxt_pkt));
13699 rack_check_data_after_close(struct mbuf *m,
13700 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
13702 struct tcp_rack *rack;
13704 rack = (struct tcp_rack *)tp->t_fb_ptr;
13705 if (rack->rc_allow_data_af_clo == 0) {
13707 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
13708 /* tcp_close will kill the inp pre-log the Reset */
13709 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
13710 tp = tcp_close(tp);
13711 KMOD_TCPSTAT_INC(tcps_rcvafterclose);
13712 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
13715 if (sbavail(&so->so_snd) == 0)
13717 /* Ok we allow data that is ignored and a followup reset */
13718 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
13719 tp->rcv_nxt = th->th_seq + *tlen;
13720 tp->t_flags2 |= TF2_DROP_AF_DATA;
13721 rack->r_wanted_output = 1;
13727 * Return value of 1, the TCB is unlocked and most
13728 * likely gone, return value of 0, the TCP is still
13732 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
13733 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13734 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13736 int32_t ret_val = 0;
13737 int32_t ourfinisacked = 0;
13738 struct tcp_rack *rack;
13740 rack = (struct tcp_rack *)tp->t_fb_ptr;
13741 ctf_calc_rwin(so, tp);
13743 if ((thflags & TH_RST) ||
13744 (tp->t_fin_is_rst && (thflags & TH_FIN)))
13745 return (__ctf_process_rst(m, th, so, tp,
13746 &rack->r_ctl.challenge_ack_ts,
13747 &rack->r_ctl.challenge_ack_cnt));
13749 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
13750 * synchronized state.
13752 if (thflags & TH_SYN) {
13753 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
13757 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
13758 * it's less than ts_recent, drop it.
13760 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
13761 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
13762 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
13765 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
13766 &rack->r_ctl.challenge_ack_ts,
13767 &rack->r_ctl.challenge_ack_cnt)) {
13771 * If new data are received on a connection after the user processes
13772 * are gone, then RST the other end.
13774 if ((tp->t_flags & TF_CLOSED) && tlen &&
13775 rack_check_data_after_close(m, tp, &tlen, th, so))
13778 * If last ACK falls within this segment's sequence numbers, record
13779 * its timestamp. NOTE: 1) That the test incorporates suggestions
13780 * from the latest proposal of the tcplw@cray.com list (Braden
13781 * 1993/04/26). 2) That updating only on newer timestamps interferes
13782 * with our earlier PAWS tests, so this check should be solely
13783 * predicated on the sequence space of this segment. 3) That we
13784 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
13785 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
13786 * SEG.Len, This modified check allows us to overcome RFC1323's
13787 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
13788 * p.869. In such cases, we can still calculate the RTT correctly
13789 * when RCV.NXT == Last.ACK.Sent.
13791 if ((to->to_flags & TOF_TS) != 0 &&
13792 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
13793 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
13794 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
13795 tp->ts_recent_age = tcp_ts_getticks();
13796 tp->ts_recent = to->to_tsval;
13799 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
13800 * is on (half-synchronized state), then queue data for later
13801 * processing; else drop segment and return.
13803 if ((thflags & TH_ACK) == 0) {
13804 if (tp->t_flags & TF_NEEDSYN) {
13805 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13806 tiwin, thflags, nxt_pkt));
13807 } else if (tp->t_flags & TF_ACKNOW) {
13808 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
13809 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
13812 ctf_do_drop(m, NULL);
13819 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
13822 if (ourfinisacked) {
13824 * If we can't receive any more data, then closing user can
13825 * proceed. Starting the timer is contrary to the
13826 * specification, but if we don't get a FIN we'll hang
13829 * XXXjl: we should release the tp also, and use a
13830 * compressed state.
13832 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13833 soisdisconnected(so);
13834 tcp_timer_activate(tp, TT_2MSL,
13835 (tcp_fast_finwait2_recycle ?
13836 tcp_finwait2_timeout :
13839 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13841 if (sbavail(&so->so_snd)) {
13842 if (ctf_progress_timeout_check(tp, true)) {
13843 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
13844 tp, tick, PROGRESS_DROP, __LINE__);
13845 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13849 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13850 tiwin, thflags, nxt_pkt));
13854 * Return value of 1, the TCB is unlocked and most
13855 * likely gone, return value of 0, the TCP is still
13859 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
13860 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13861 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13863 int32_t ret_val = 0;
13864 int32_t ourfinisacked = 0;
13865 struct tcp_rack *rack;
13867 rack = (struct tcp_rack *)tp->t_fb_ptr;
13868 ctf_calc_rwin(so, tp);
13870 if ((thflags & TH_RST) ||
13871 (tp->t_fin_is_rst && (thflags & TH_FIN)))
13872 return (__ctf_process_rst(m, th, so, tp,
13873 &rack->r_ctl.challenge_ack_ts,
13874 &rack->r_ctl.challenge_ack_cnt));
13876 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
13877 * synchronized state.
13879 if (thflags & TH_SYN) {
13880 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
13884 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
13885 * it's less than ts_recent, drop it.
13887 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
13888 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
13889 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
13892 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
13893 &rack->r_ctl.challenge_ack_ts,
13894 &rack->r_ctl.challenge_ack_cnt)) {
13898 * If new data are received on a connection after the user processes
13899 * are gone, then RST the other end.
13901 if ((tp->t_flags & TF_CLOSED) && tlen &&
13902 rack_check_data_after_close(m, tp, &tlen, th, so))
13905 * If last ACK falls within this segment's sequence numbers, record
13906 * its timestamp. NOTE: 1) That the test incorporates suggestions
13907 * from the latest proposal of the tcplw@cray.com list (Braden
13908 * 1993/04/26). 2) That updating only on newer timestamps interferes
13909 * with our earlier PAWS tests, so this check should be solely
13910 * predicated on the sequence space of this segment. 3) That we
13911 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
13912 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
13913 * SEG.Len, This modified check allows us to overcome RFC1323's
13914 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
13915 * p.869. In such cases, we can still calculate the RTT correctly
13916 * when RCV.NXT == Last.ACK.Sent.
13918 if ((to->to_flags & TOF_TS) != 0 &&
13919 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
13920 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
13921 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
13922 tp->ts_recent_age = tcp_ts_getticks();
13923 tp->ts_recent = to->to_tsval;
13926 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
13927 * is on (half-synchronized state), then queue data for later
13928 * processing; else drop segment and return.
13930 if ((thflags & TH_ACK) == 0) {
13931 if (tp->t_flags & TF_NEEDSYN) {
13932 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13933 tiwin, thflags, nxt_pkt));
13934 } else if (tp->t_flags & TF_ACKNOW) {
13935 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
13936 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
13939 ctf_do_drop(m, NULL);
13946 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
13949 if (ourfinisacked) {
13954 if (sbavail(&so->so_snd)) {
13955 if (ctf_progress_timeout_check(tp, true)) {
13956 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
13957 tp, tick, PROGRESS_DROP, __LINE__);
13958 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13962 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13963 tiwin, thflags, nxt_pkt));
13967 * Return value of 1, the TCB is unlocked and most
13968 * likely gone, return value of 0, the TCP is still
13972 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
13973 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13974 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13976 int32_t ret_val = 0;
13977 int32_t ourfinisacked = 0;
13978 struct tcp_rack *rack;
13980 rack = (struct tcp_rack *)tp->t_fb_ptr;
13981 ctf_calc_rwin(so, tp);
13983 if ((thflags & TH_RST) ||
13984 (tp->t_fin_is_rst && (thflags & TH_FIN)))
13985 return (__ctf_process_rst(m, th, so, tp,
13986 &rack->r_ctl.challenge_ack_ts,
13987 &rack->r_ctl.challenge_ack_cnt));
13989 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
13990 * synchronized state.
13992 if (thflags & TH_SYN) {
13993 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
13997 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
13998 * it's less than ts_recent, drop it.
14000 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
14001 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
14002 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
14005 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
14006 &rack->r_ctl.challenge_ack_ts,
14007 &rack->r_ctl.challenge_ack_cnt)) {
14011 * If new data are received on a connection after the user processes
14012 * are gone, then RST the other end.
14014 if ((tp->t_flags & TF_CLOSED) && tlen &&
14015 rack_check_data_after_close(m, tp, &tlen, th, so))
14018 * If last ACK falls within this segment's sequence numbers, record
14019 * its timestamp. NOTE: 1) That the test incorporates suggestions
14020 * from the latest proposal of the tcplw@cray.com list (Braden
14021 * 1993/04/26). 2) That updating only on newer timestamps interferes
14022 * with our earlier PAWS tests, so this check should be solely
14023 * predicated on the sequence space of this segment. 3) That we
14024 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
14025 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
14026 * SEG.Len, This modified check allows us to overcome RFC1323's
14027 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
14028 * p.869. In such cases, we can still calculate the RTT correctly
14029 * when RCV.NXT == Last.ACK.Sent.
14031 if ((to->to_flags & TOF_TS) != 0 &&
14032 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
14033 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
14034 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
14035 tp->ts_recent_age = tcp_ts_getticks();
14036 tp->ts_recent = to->to_tsval;
14039 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
14040 * is on (half-synchronized state), then queue data for later
14041 * processing; else drop segment and return.
14043 if ((thflags & TH_ACK) == 0) {
14044 if (tp->t_flags & TF_NEEDSYN) {
14045 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
14046 tiwin, thflags, nxt_pkt));
14047 } else if (tp->t_flags & TF_ACKNOW) {
14048 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
14049 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
14052 ctf_do_drop(m, NULL);
14057 * case TCPS_LAST_ACK: Ack processing.
14059 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
14062 if (ourfinisacked) {
14063 tp = tcp_close(tp);
14064 ctf_do_drop(m, tp);
14067 if (sbavail(&so->so_snd)) {
14068 if (ctf_progress_timeout_check(tp, true)) {
14069 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
14070 tp, tick, PROGRESS_DROP, __LINE__);
14071 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
14075 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
14076 tiwin, thflags, nxt_pkt));
14080 * Return value of 1, the TCB is unlocked and most
14081 * likely gone, return value of 0, the TCP is still
14085 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
14086 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
14087 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
14089 int32_t ret_val = 0;
14090 int32_t ourfinisacked = 0;
14091 struct tcp_rack *rack;
14093 rack = (struct tcp_rack *)tp->t_fb_ptr;
14094 ctf_calc_rwin(so, tp);
14096 /* Reset receive buffer auto scaling when not in bulk receive mode. */
14097 if ((thflags & TH_RST) ||
14098 (tp->t_fin_is_rst && (thflags & TH_FIN)))
14099 return (__ctf_process_rst(m, th, so, tp,
14100 &rack->r_ctl.challenge_ack_ts,
14101 &rack->r_ctl.challenge_ack_cnt));
14103 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
14104 * synchronized state.
14106 if (thflags & TH_SYN) {
14107 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
14111 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
14112 * it's less than ts_recent, drop it.
14114 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
14115 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
14116 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
14119 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
14120 &rack->r_ctl.challenge_ack_ts,
14121 &rack->r_ctl.challenge_ack_cnt)) {
14125 * If new data are received on a connection after the user processes
14126 * are gone, then RST the other end.
14128 if ((tp->t_flags & TF_CLOSED) && tlen &&
14129 rack_check_data_after_close(m, tp, &tlen, th, so))
14132 * If last ACK falls within this segment's sequence numbers, record
14133 * its timestamp. NOTE: 1) That the test incorporates suggestions
14134 * from the latest proposal of the tcplw@cray.com list (Braden
14135 * 1993/04/26). 2) That updating only on newer timestamps interferes
14136 * with our earlier PAWS tests, so this check should be solely
14137 * predicated on the sequence space of this segment. 3) That we
14138 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
14139 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
14140 * SEG.Len, This modified check allows us to overcome RFC1323's
14141 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
14142 * p.869. In such cases, we can still calculate the RTT correctly
14143 * when RCV.NXT == Last.ACK.Sent.
14145 if ((to->to_flags & TOF_TS) != 0 &&
14146 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
14147 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
14148 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
14149 tp->ts_recent_age = tcp_ts_getticks();
14150 tp->ts_recent = to->to_tsval;
14153 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
14154 * is on (half-synchronized state), then queue data for later
14155 * processing; else drop segment and return.
14157 if ((thflags & TH_ACK) == 0) {
14158 if (tp->t_flags & TF_NEEDSYN) {
14159 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
14160 tiwin, thflags, nxt_pkt));
14161 } else if (tp->t_flags & TF_ACKNOW) {
14162 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
14163 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
14166 ctf_do_drop(m, NULL);
14173 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
14176 if (sbavail(&so->so_snd)) {
14177 if (ctf_progress_timeout_check(tp, true)) {
14178 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
14179 tp, tick, PROGRESS_DROP, __LINE__);
14180 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
14184 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
14185 tiwin, thflags, nxt_pkt));
14189 rack_clear_rate_sample(struct tcp_rack *rack)
14191 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
14192 rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
14193 rack->r_ctl.rack_rs.rs_rtt_tot = 0;
14197 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override)
14199 uint64_t bw_est, rate_wanted;
14201 uint32_t user_max, orig_min, orig_max;
14203 #ifdef TCP_REQUEST_TRK
14204 if (rack->rc_hybrid_mode &&
14205 (rack->r_ctl.rc_pace_max_segs != 0) &&
14206 (rack_hybrid_allow_set_maxseg == 1) &&
14207 (rack->r_ctl.rc_last_sft != NULL)) {
14208 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS;
14212 orig_min = rack->r_ctl.rc_pace_min_segs;
14213 orig_max = rack->r_ctl.rc_pace_max_segs;
14214 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs;
14215 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs)
14217 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp);
14218 if (rack->use_fixed_rate || rack->rc_force_max_seg) {
14219 if (user_max != rack->r_ctl.rc_pace_max_segs)
14222 if (rack->rc_force_max_seg) {
14223 rack->r_ctl.rc_pace_max_segs = user_max;
14224 } else if (rack->use_fixed_rate) {
14225 bw_est = rack_get_bw(rack);
14226 if ((rack->r_ctl.crte == NULL) ||
14227 (bw_est != rack->r_ctl.crte->rate)) {
14228 rack->r_ctl.rc_pace_max_segs = user_max;
14230 /* We are pacing right at the hardware rate */
14231 uint32_t segsiz, pace_one;
14233 if (rack_pace_one_seg ||
14234 (rack->r_ctl.rc_user_set_min_segs == 1))
14238 segsiz = min(ctf_fixed_maxseg(tp),
14239 rack->r_ctl.rc_pace_min_segs);
14240 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(
14241 tp, bw_est, segsiz, pace_one,
14242 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor);
14244 } else if (rack->rc_always_pace) {
14245 if (rack->r_ctl.gp_bw ||
14246 rack->r_ctl.init_rate) {
14247 /* We have a rate of some sort set */
14250 bw_est = rack_get_bw(rack);
14251 orig = rack->r_ctl.rc_pace_max_segs;
14253 rate_wanted = *fill_override;
14255 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL);
14257 /* We have something */
14258 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack,
14260 ctf_fixed_maxseg(rack->rc_tp));
14262 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs;
14263 if (orig != rack->r_ctl.rc_pace_max_segs)
14265 } else if ((rack->r_ctl.gp_bw == 0) &&
14266 (rack->r_ctl.rc_pace_max_segs == 0)) {
14268 * If we have nothing limit us to bursting
14269 * out IW sized pieces.
14272 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack);
14275 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) {
14277 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES;
14280 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2);
14285 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags)
14288 struct ip6_hdr *ip6 = NULL;
14291 struct ip *ip = NULL;
14293 struct udphdr *udp = NULL;
14295 /* Ok lets fill in the fast block, it can only be used with no IP options! */
14297 if (rack->r_is_v6) {
14298 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
14299 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
14301 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
14302 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
14303 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
14304 udp->uh_dport = tp->t_port;
14305 rack->r_ctl.fsb.udp = udp;
14306 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
14309 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1);
14310 rack->r_ctl.fsb.udp = NULL;
14312 tcpip_fillheaders(rack->rc_inp,
14314 ip6, rack->r_ctl.fsb.th);
14315 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL);
14320 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr);
14321 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
14323 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
14324 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
14325 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
14326 udp->uh_dport = tp->t_port;
14327 rack->r_ctl.fsb.udp = udp;
14328 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
14331 rack->r_ctl.fsb.udp = NULL;
14332 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1);
14334 tcpip_fillheaders(rack->rc_inp,
14336 ip, rack->r_ctl.fsb.th);
14337 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl;
14340 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0),
14341 (long)TCP_MAXWIN << tp->rcv_scale);
14342 rack->r_fsb_inited = 1;
14346 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack)
14349 * Allocate the larger of spaces V6 if available else just
14350 * V4 and include udphdr (overbook)
14353 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr);
14355 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr);
14357 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len,
14358 M_TCPFSB, M_NOWAIT|M_ZERO);
14359 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) {
14362 rack->r_fsb_inited = 0;
14367 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod)
14370 * Types of logs (mod value)
14371 * 20 - Initial round setup
14372 * 21 - Rack declares a new round.
14377 if (tcp_bblogging_on(tp)) {
14378 union tcp_log_stackspecific log;
14381 memset(&log, 0, sizeof(log));
14382 log.u_bbr.flex1 = rack->r_ctl.current_round;
14383 log.u_bbr.flex2 = rack->r_ctl.roundends;
14384 log.u_bbr.flex3 = high_seq;
14385 log.u_bbr.flex4 = tp->snd_max;
14386 log.u_bbr.flex8 = mod;
14387 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14388 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes;
14389 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes;
14390 TCP_LOG_EVENTP(tp, NULL,
14391 &tptosocket(tp)->so_rcv,
14392 &tptosocket(tp)->so_snd,
14394 0, &log, false, &tv);
14399 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack)
14401 rack->rack_deferred_inited = 1;
14402 rack->r_ctl.roundends = tp->snd_max;
14403 rack->r_ctl.rc_high_rwnd = tp->snd_wnd;
14404 rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
14408 rack_init_retransmit_value(struct tcp_rack *rack, int ctl)
14410 /* Retransmit bit controls.
14412 * The setting of these values control one of
14413 * three settings you can have and dictate
14414 * how rack does retransmissions. Note this
14415 * is in *any* mode i.e. pacing on or off DGP
14416 * fixed rate pacing, or just bursting rack.
14418 * 1 - Use full sized retransmits i.e. limit
14419 * the size to whatever the pace_max_segments
14422 * 2 - Use pacer min granularity as a guide to
14423 * the size combined with the current calculated
14424 * goodput b/w measurement. So for example if
14425 * the goodput is measured at 20Mbps we would
14426 * calculate 8125 (pacer minimum 250usec in
14427 * that b/w) and then round it up to the next
14428 * MSS i.e. for 1448 mss 6 MSS or 8688 bytes.
14430 * 0 - The rack default 1 MSS (anything not 0/1/2
14431 * fall here too if we are setting via rack_init()).
14435 rack->full_size_rxt = 1;
14436 rack->shape_rxt_to_pacing_min = 0;
14437 } else if (ctl == 2) {
14438 rack->full_size_rxt = 0;
14439 rack->shape_rxt_to_pacing_min = 1;
14441 rack->full_size_rxt = 0;
14442 rack->shape_rxt_to_pacing_min = 0;
14447 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod,
14452 if (tcp_bblogging_on(rack->rc_tp)) {
14453 union tcp_log_stackspecific log;
14456 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
14457 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14458 log.u_bbr.flex8 = mod;
14459 log.u_bbr.flex1 = flex1;
14460 log.u_bbr.flex2 = flex2;
14461 log.u_bbr.flex3 = flex3;
14462 tcp_log_event(tp, NULL, NULL, NULL, TCP_CHG_QUERY, 0,
14463 0, &log, false, NULL, __func__, __LINE__, &tv);
14468 rack_chg_query(struct tcpcb *tp, struct tcp_query_resp *reqr)
14470 struct tcp_rack *rack;
14471 struct rack_sendmap *rsm;
14475 rack = (struct tcp_rack *)tp->t_fb_ptr;
14476 switch (reqr->req) {
14477 case TCP_QUERY_SENDMAP:
14478 if ((reqr->req_param == tp->snd_max) ||
14479 (tp->snd_max == tp->snd_una)){
14483 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param);
14485 /* Can't find that seq -- unlikely */
14488 reqr->sendmap_start = rsm->r_start;
14489 reqr->sendmap_end = rsm->r_end;
14490 reqr->sendmap_send_cnt = rsm->r_rtr_cnt;
14491 reqr->sendmap_fas = rsm->r_fas;
14492 if (reqr->sendmap_send_cnt > SNDMAP_NRTX)
14493 reqr->sendmap_send_cnt = SNDMAP_NRTX;
14494 for(i=0; i<reqr->sendmap_send_cnt; i++)
14495 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i];
14496 reqr->sendmap_ack_arrival = rsm->r_ack_arrival;
14497 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK;
14498 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes;
14499 reqr->sendmap_dupacks = rsm->r_dupack;
14500 rack_log_chg_info(tp, rack, 1,
14506 case TCP_QUERY_TIMERS_UP:
14507 if (rack->r_ctl.rc_hpts_flags == 0) {
14511 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags;
14512 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
14513 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to;
14515 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
14516 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp;
14518 rack_log_chg_info(tp, rack, 2,
14519 rack->r_ctl.rc_hpts_flags,
14520 rack->r_ctl.rc_last_output_to,
14521 rack->r_ctl.rc_timer_exp);
14524 case TCP_QUERY_RACK_TIMES:
14525 /* Reordering items */
14526 reqr->rack_num_dsacks = rack->r_ctl.num_dsack;
14527 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts;
14528 /* Timerstamps and timers */
14529 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time;
14530 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt;
14531 reqr->rack_rtt = rack->rc_rack_rtt;
14532 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time;
14533 reqr->rack_srtt_measured = rack->rc_srtt_measure_made;
14535 reqr->rack_sacked = rack->r_ctl.rc_sacked;
14536 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt;
14537 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered;
14538 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs;
14539 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt;
14540 reqr->rack_prr_out = rack->r_ctl.rc_prr_out;
14541 /* TLP and persists info */
14542 reqr->rack_tlp_out = rack->rc_tlp_in_progress;
14543 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out;
14544 if (rack->rc_in_persist) {
14545 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time;
14546 reqr->rack_in_persist = 1;
14548 reqr->rack_time_went_idle = 0;
14549 reqr->rack_in_persist = 0;
14551 if (rack->r_wanted_output)
14552 reqr->rack_wanted_output = 1;
14554 reqr->rack_wanted_output = 0;
14563 rack_switch_failed(struct tcpcb *tp)
14566 * This method gets called if a stack switch was
14567 * attempted and it failed. We are left
14568 * but our hpts timers were stopped and we
14569 * need to validate time units and inp_flags2.
14571 struct inpcb *inp = tptoinpcb(tp);
14572 struct tcp_rack *rack;
14576 struct hpts_diag diag;
14578 rack = (struct tcp_rack *)tp->t_fb_ptr;
14579 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC);
14580 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
14581 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
14583 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
14584 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
14585 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
14586 if (inp->inp_in_hpts) {
14590 cts = tcp_get_usecs(&tv);
14591 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
14592 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) {
14593 toval = rack->r_ctl.rc_last_output_to - cts;
14595 /* one slot please */
14596 toval = HPTS_TICKS_PER_SLOT;
14598 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
14599 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
14600 toval = rack->r_ctl.rc_timer_exp - cts;
14602 /* one slot please */
14603 toval = HPTS_TICKS_PER_SLOT;
14606 toval = HPTS_TICKS_PER_SLOT;
14607 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(toval),
14609 rack_log_hpts_diag(rack, cts, &diag, &tv);
14613 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr)
14615 struct rack_sendmap *rsm, *ersm;
14616 int insret __diagused;
14618 * When initing outstanding, we must be quite careful
14619 * to not refer to tp->t_fb_ptr. This has the old rack
14620 * pointer in it, not the "new" one (when we are doing
14625 if (tp->t_fb->tfb_chg_query == NULL) {
14626 /* Create a send map for the current outstanding data */
14628 rsm = rack_alloc(rack);
14630 uma_zfree(rack_pcb_zone, ptr);
14633 rsm->r_no_rtt_allowed = 1;
14634 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
14635 rsm->r_rtr_cnt = 1;
14636 rsm->r_rtr_bytes = 0;
14637 if (tp->t_flags & TF_SENTFIN)
14638 rsm->r_flags |= RACK_HAS_FIN;
14639 rsm->r_end = tp->snd_max;
14640 if (tp->snd_una == tp->iss) {
14641 /* The data space is one beyond snd_una */
14642 rsm->r_flags |= RACK_HAS_SYN;
14643 rsm->r_start = tp->iss;
14644 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una);
14646 rsm->r_start = tp->snd_una;
14648 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) {
14649 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff);
14651 rsm->orig_m_len = rsm->m->m_len;
14652 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
14654 rsm->orig_m_len = 0;
14655 rsm->orig_t_space = 0;
14659 * This can happen if we have a stand-alone FIN or
14663 rsm->orig_m_len = 0;
14664 rsm->orig_t_space = 0;
14668 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) {
14669 panic("Insert in rb tree fails ret:%d rack:%p rsm:%p",
14670 insret, rack, rsm);
14673 (void)tqhash_insert(rack->r_ctl.tqh, rsm);
14675 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
14676 rsm->r_in_tmap = 1;
14678 /* We have a query mechanism, lets use it */
14679 struct tcp_query_resp qr;
14684 while (at != tp->snd_max) {
14685 memset(&qr, 0, sizeof(qr));
14686 qr.req = TCP_QUERY_SENDMAP;
14688 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0)
14691 at = qr.sendmap_end;
14692 /* Now lets build the entry for this one */
14693 rsm = rack_alloc(rack);
14695 uma_zfree(rack_pcb_zone, ptr);
14698 memset(rsm, 0, sizeof(struct rack_sendmap));
14699 /* Now configure the rsm and insert it */
14700 rsm->r_dupack = qr.sendmap_dupacks;
14701 rsm->r_start = qr.sendmap_start;
14702 rsm->r_end = qr.sendmap_end;
14703 if (qr.sendmap_fas)
14704 rsm->r_fas = qr.sendmap_end;
14706 rsm->r_fas = rsm->r_start - tp->snd_una;
14708 * We have carefully aligned the bits
14709 * so that all we have to do is copy over
14710 * the bits with the mask.
14712 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK;
14713 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes;
14714 rsm->r_rtr_cnt = qr.sendmap_send_cnt;
14715 rsm->r_ack_arrival = qr.sendmap_ack_arrival;
14716 for (i=0 ; i<rsm->r_rtr_cnt; i++)
14717 rsm->r_tim_lastsent[i] = qr.sendmap_time[i];
14718 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd,
14719 (rsm->r_start - tp->snd_una), &rsm->soff);
14721 rsm->orig_m_len = rsm->m->m_len;
14722 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
14724 rsm->orig_m_len = 0;
14725 rsm->orig_t_space = 0;
14728 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) {
14729 panic("Insert in rb tree fails ret:%d rack:%p rsm:%p",
14730 insret, rack, rsm);
14733 (void)tqhash_insert(rack->r_ctl.tqh, rsm);
14735 if ((rsm->r_flags & RACK_ACKED) == 0) {
14736 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) {
14737 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] >
14738 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) {
14740 * If the existing ersm was sent at
14741 * a later time than the new one, then
14742 * the new one should appear ahead of this
14745 rsm->r_in_tmap = 1;
14746 TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext);
14750 if (rsm->r_in_tmap == 0) {
14752 * Not found so shove it on the tail.
14754 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
14755 rsm->r_in_tmap = 1;
14758 if ((rack->r_ctl.rc_sacklast == NULL) ||
14759 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) {
14760 rack->r_ctl.rc_sacklast = rsm;
14763 rack_log_chg_info(tp, rack, 3,
14773 rack_translate_clamp_value(struct tcp_rack *rack, uint32_t optval)
14777 * F = fill cw bit -- Toggle fillcw if this bit is set.
14779 * M = set max segment bit
14781 * C = If set to non-zero override the max number of clamps.
14782 * L = Bit to indicate if clamped gets lower.
14784 * CCCC CCCCC UUUU UULF PPPP PPPP PPPP PPPP
14786 * The lowest 3 nibbles is the perentage .1 - 6553.5%
14787 * where 10.1 = 101, max 6553.5
14788 * The upper 16 bits holds some options.
14789 * The F bit will turn on fill-cw on if you are
14790 * not pacing, it will turn it off if dgp is on.
14791 * The L bit will change it so when clamped we get
14792 * the min(gp, lt-bw) for dgp.
14796 rack->r_ctl.saved_rxt_clamp_val = optval;
14797 per = optval & 0x0000ffff;
14798 rack->r_ctl.rxt_threshold = (uint64_t)(per & 0xffff);
14800 uint16_t clamp_opt;
14802 rack->excess_rxt_on = 1;
14803 clamp_opt = ((optval & 0xffff0000) >> 16);
14804 rack->r_ctl.clamp_options = clamp_opt & 0x00ff;
14805 if (clamp_opt & 0xff00) {
14806 /* A max clamps is also present */
14807 rack->r_ctl.max_clamps = (clamp_opt >> 8);
14809 /* No specified clamps means no limit */
14810 rack->r_ctl.max_clamps = 0;
14812 if (rack->r_ctl.clamp_options & 0x0002) {
14813 rack->r_clamped_gets_lower = 1;
14815 rack->r_clamped_gets_lower = 0;
14818 /* Turn it off back to default */
14819 rack->excess_rxt_on = 0;
14820 rack->r_clamped_gets_lower = 0;
14827 rack_init(struct tcpcb *tp, void **ptr)
14829 struct inpcb *inp = tptoinpcb(tp);
14830 struct tcp_rack *rack = NULL;
14831 uint32_t iwin, snt, us_cts;
14835 * First are we the initial or are we a switched stack?
14836 * If we are initing via tcp_newtcppcb the ptr passed
14837 * will be tp->t_fb_ptr. If its a stack switch that
14838 * has a previous stack we can query it will be a local
14839 * var that will in the end be set into t_fb_ptr.
14841 if (ptr == &tp->t_fb_ptr)
14845 *ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
14846 if (*ptr == NULL) {
14848 * We need to allocate memory but cant. The INP and INP_INFO
14849 * locks and they are recursive (happens during setup. So a
14850 * scheme to drop the locks fails :(
14855 memset(*ptr, 0, sizeof(struct tcp_rack));
14856 rack = (struct tcp_rack *)*ptr;
14857 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT);
14858 if (rack->r_ctl.tqh == NULL) {
14859 uma_zfree(rack_pcb_zone, rack);
14862 tqhash_init(rack->r_ctl.tqh);
14863 TAILQ_INIT(&rack->r_ctl.rc_free);
14864 TAILQ_INIT(&rack->r_ctl.rc_tmap);
14866 rack->rc_inp = inp;
14868 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0;
14869 /* Probably not needed but lets be sure */
14870 rack_clear_rate_sample(rack);
14872 * Save off the default values, socket options will poke
14873 * at these if pacing is not on or we have not yet
14874 * reached where pacing is on (gp_ready/fixed enabled).
14875 * When they get set into the CC module (when gp_ready
14876 * is enabled or we enable fixed) then we will set these
14877 * values into the CC and place in here the old values
14878 * so we have a restoral. Then we will set the flag
14879 * rc_pacing_cc_set. That way whenever we turn off pacing
14880 * or switch off this stack, we will know to go restore
14881 * the saved values.
14883 * We specifically put into the beta the ecn value for pacing.
14885 rack->rc_new_rnd_needed = 1;
14886 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit;
14887 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn;
14888 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn;
14889 /* We want abe like behavior as well */
14890 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED;
14891 rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
14892 rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
14893 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
14894 if (rack_rxt_clamp_thresh) {
14895 rack_translate_clamp_value(rack, rack_rxt_clamp_thresh);
14896 rack->excess_rxt_on = 1;
14898 if (rack_uses_full_dgp_in_rec)
14899 rack->r_ctl.full_dgp_in_rec = 1;
14900 if (rack_fill_cw_state)
14901 rack->rc_pace_to_cwnd = 1;
14902 if (rack_pacing_min_seg)
14903 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg;
14905 rack->use_rack_rr = 1;
14906 if (rack_dnd_default) {
14907 rack->rc_pace_dnd = 1;
14909 if (V_tcp_delack_enabled)
14910 tp->t_delayed_ack = 1;
14912 tp->t_delayed_ack = 0;
14913 #ifdef TCP_ACCOUNTING
14914 if (rack_tcp_accounting) {
14915 tp->t_flags2 |= TF2_TCP_ACCOUNTING;
14918 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss;
14919 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca;
14920 if (rack_enable_shared_cwnd)
14921 rack->rack_enable_scwnd = 1;
14922 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor;
14923 rack->rc_user_set_max_segs = rack_hptsi_segments;
14924 rack->rc_force_max_seg = 0;
14925 TAILQ_INIT(&rack->r_ctl.opt_list);
14926 if (rack_hibeta_setting)
14927 rack->rack_hibeta = 1;
14928 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
14929 rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
14930 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
14931 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff;
14932 rack->r_ctl.rc_highest_us_rtt = 0;
14933 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap;
14934 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop);
14935 if (rack_use_cmp_acks)
14936 rack->r_use_cmp_ack = 1;
14937 if (rack_disable_prr)
14938 rack->rack_no_prr = 1;
14939 if (rack_gp_no_rec_chg)
14940 rack->rc_gp_no_rec_chg = 1;
14941 if (rack_pace_every_seg && tcp_can_enable_pacing()) {
14942 rack->rc_always_pace = 1;
14943 if ((rack->gp_ready) && (rack->rc_always_pace && (rack->use_fixed_rate == 0)))
14944 rack_set_cc_pacing(rack);
14946 rack->rc_always_pace = 0;
14947 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack)
14948 rack->r_mbuf_queue = 1;
14950 rack->r_mbuf_queue = 0;
14951 rack_set_pace_segments(tp, rack, __LINE__, NULL);
14952 if (rack_limits_scwnd)
14953 rack->r_limit_scw = 1;
14955 rack->r_limit_scw = 0;
14956 rack_init_retransmit_value(rack, rack_rxt_controls);
14957 rack->rc_labc = V_tcp_abc_l_var;
14958 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
14959 rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
14960 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
14961 rack->r_ctl.rc_min_to = rack_min_to;
14962 microuptime(&rack->r_ctl.act_rcv_time);
14963 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
14964 rack->rc_init_win = rack_default_init_window;
14965 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss;
14966 if (rack_hw_up_only)
14967 rack->r_up_only = 1;
14968 if (rack_do_dyn_mul) {
14969 /* When dynamic adjustment is on CA needs to start at 100% */
14970 rack->rc_gp_dyn_mul = 1;
14971 if (rack_do_dyn_mul >= 100)
14972 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
14974 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
14975 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec;
14976 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
14977 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
14978 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN,
14979 rack_probertt_filter_life);
14980 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
14981 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
14982 rack->r_ctl.rc_time_of_last_probertt = us_cts;
14983 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks();
14984 rack->r_ctl.rc_time_probertt_starts = 0;
14985 if (rack_dsack_std_based & 0x1) {
14986 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */
14987 rack->rc_rack_tmr_std_based = 1;
14989 if (rack_dsack_std_based & 0x2) {
14990 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */
14991 rack->rc_rack_use_dsack = 1;
14993 /* We require at least one measurement, even if the sysctl is 0 */
14994 if (rack_req_measurements)
14995 rack->r_ctl.req_measurements = rack_req_measurements;
14997 rack->r_ctl.req_measurements = 1;
14998 if (rack_enable_hw_pacing)
14999 rack->rack_hdw_pace_ena = 1;
15000 if (rack_hw_rate_caps)
15001 rack->r_rack_hw_rate_caps = 1;
15002 #ifdef TCP_SAD_DETECTION
15003 rack->do_detection = 1;
15005 rack->do_detection = 0;
15007 if (rack_non_rxt_use_cr)
15008 rack->rack_rec_nonrxt_use_cr = 1;
15009 /* Lets setup the fsb block */
15010 err = rack_init_fsb(tp, rack);
15012 uma_zfree(rack_pcb_zone, *ptr);
15016 if (rack_do_hystart) {
15017 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED;
15018 if (rack_do_hystart > 1)
15019 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND;
15020 if (rack_do_hystart > 2)
15021 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH;
15023 /* Log what we will do with queries */
15024 rack_log_chg_info(tp, rack, 7,
15026 if (rack_def_profile)
15027 rack_set_profile(rack, rack_def_profile);
15028 /* Cancel the GP measurement in progress */
15029 tp->t_flags &= ~TF_GPUTINPROG;
15030 if ((tp->t_state != TCPS_CLOSED) &&
15031 (tp->t_state != TCPS_TIME_WAIT)) {
15033 * We are already open, we may
15034 * need to adjust a few things.
15036 if (SEQ_GT(tp->snd_max, tp->iss))
15037 snt = tp->snd_max - tp->iss;
15040 iwin = rc_init_window(rack);
15041 if ((snt < iwin) &&
15043 /* We are not past the initial window
15044 * on the first init (i.e. a stack switch
15045 * has not yet occured) so we need to make
15046 * sure cwnd and ssthresh is correct.
15048 if (tp->snd_cwnd < iwin)
15049 tp->snd_cwnd = iwin;
15051 * If we are within the initial window
15052 * we want ssthresh to be unlimited. Setting
15053 * it to the rwnd (which the default stack does
15054 * and older racks) is not really a good idea
15055 * since we want to be in SS and grow both the
15056 * cwnd and the rwnd (via dynamic rwnd growth). If
15057 * we set it to the rwnd then as the peer grows its
15058 * rwnd we will be stuck in CA and never hit SS.
15060 * Its far better to raise it up high (this takes the
15061 * risk that there as been a loss already, probably
15062 * we should have an indicator in all stacks of loss
15063 * but we don't), but considering the normal use this
15064 * is a risk worth taking. The consequences of not
15065 * hitting SS are far worse than going one more time
15066 * into it early on (before we have sent even a IW).
15067 * It is highly unlikely that we will have had a loss
15068 * before getting the IW out.
15070 tp->snd_ssthresh = 0xffffffff;
15073 * Any init based on sequence numbers
15074 * should be done in the deferred init path
15075 * since we can be CLOSED and not have them
15076 * inited when rack_init() is called. We
15077 * are not closed so lets call it.
15079 rack_deferred_init(tp, rack);
15081 if ((tp->t_state != TCPS_CLOSED) &&
15082 (tp->t_state != TCPS_TIME_WAIT) &&
15084 (tp->snd_una != tp->snd_max)) {
15085 err = rack_init_outstanding(tp, rack, us_cts, *ptr);
15091 rack_stop_all_timers(tp, rack);
15092 /* Setup all the inp_flags2 */
15093 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
15094 tptoinpcb(tp)->inp_flags2 |= INP_SUPPORTS_MBUFQ;
15096 tptoinpcb(tp)->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
15097 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
15098 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
15100 * Timers in Rack are kept in microseconds so lets
15101 * convert any initial incoming variables
15102 * from ticks into usecs. Note that we
15103 * also change the values of t_srtt and t_rttvar, if
15104 * they are non-zero. They are kept with a 5
15105 * bit decimal so we have to carefully convert
15106 * these to get the full precision.
15108 rack_convert_rtts(tp);
15109 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20);
15110 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) {
15111 /* We do not start any timers on DROPPED connections */
15112 if (tp->t_fb->tfb_chg_query == NULL) {
15113 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
15115 struct tcp_query_resp qr;
15118 memset(&qr, 0, sizeof(qr));
15120 /* Get the misc time stamps and such for rack */
15121 qr.req = TCP_QUERY_RACK_TIMES;
15122 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr);
15124 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts;
15125 rack->r_ctl.num_dsack = qr.rack_num_dsacks;
15126 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time;
15127 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt;
15128 rack->rc_rack_rtt = qr.rack_rtt;
15129 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time;
15130 rack->r_ctl.rc_sacked = qr.rack_sacked;
15131 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt;
15132 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered;
15133 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs;
15134 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt;
15135 rack->r_ctl.rc_prr_out = qr.rack_prr_out;
15136 if (qr.rack_tlp_out) {
15137 rack->rc_tlp_in_progress = 1;
15138 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out;
15140 rack->rc_tlp_in_progress = 0;
15141 rack->r_ctl.rc_tlp_cnt_out = 0;
15143 if (qr.rack_srtt_measured)
15144 rack->rc_srtt_measure_made = 1;
15145 if (qr.rack_in_persist == 1) {
15146 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle;
15147 #ifdef NETFLIX_SHARED_CWND
15148 if (rack->r_ctl.rc_scw) {
15149 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
15150 rack->rack_scwnd_is_idle = 1;
15153 rack->r_ctl.persist_lost_ends = 0;
15154 rack->probe_not_answered = 0;
15155 rack->forced_ack = 0;
15156 tp->t_rxtshift = 0;
15157 rack->rc_in_persist = 1;
15158 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
15159 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
15161 if (qr.rack_wanted_output)
15162 rack->r_wanted_output = 1;
15163 rack_log_chg_info(tp, rack, 6,
15166 qr.rack_reorder_ts);
15168 /* Get the old stack timers */
15170 qr.req = TCP_QUERY_TIMERS_UP;
15171 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr);
15174 * non-zero return means we have a timer('s)
15175 * to start. Zero means no timer (no keepalive
15180 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags;
15181 if (qr.timer_hpts_flags & PACE_PKT_OUTPUT) {
15182 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to;
15183 if (TSTMP_GT(qr.timer_pacing_to, us_cts))
15184 tov = qr.timer_pacing_to - us_cts;
15186 tov = HPTS_TICKS_PER_SLOT;
15188 if (qr.timer_hpts_flags & PACE_TMR_MASK) {
15189 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp;
15191 if (TSTMP_GT(qr.timer_timer_exp, us_cts))
15192 tov = qr.timer_timer_exp - us_cts;
15194 tov = HPTS_TICKS_PER_SLOT;
15197 rack_log_chg_info(tp, rack, 4,
15198 rack->r_ctl.rc_hpts_flags,
15199 rack->r_ctl.rc_last_output_to,
15200 rack->r_ctl.rc_timer_exp);
15202 struct hpts_diag diag;
15204 (void)tcp_hpts_insert_diag(rack->rc_inp, HPTS_USEC_TO_SLOTS(tov),
15206 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time);
15210 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur,
15211 __LINE__, RACK_RTTS_INIT);
15217 rack_handoff_ok(struct tcpcb *tp)
15219 if ((tp->t_state == TCPS_CLOSED) ||
15220 (tp->t_state == TCPS_LISTEN)) {
15221 /* Sure no problem though it may not stick */
15224 if ((tp->t_state == TCPS_SYN_SENT) ||
15225 (tp->t_state == TCPS_SYN_RECEIVED)) {
15227 * We really don't know if you support sack,
15228 * you have to get to ESTAB or beyond to tell.
15232 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) {
15234 * Rack will only send a FIN after all data is acknowledged.
15235 * So in this case we have more data outstanding. We can't
15236 * switch stacks until either all data and only the FIN
15237 * is left (in which case rack_init() now knows how
15238 * to deal with that) <or> all is acknowledged and we
15239 * are only left with incoming data, though why you
15240 * would want to switch to rack after all data is acknowledged
15241 * I have no idea (rrs)!
15245 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){
15249 * If we reach here we don't do SACK on this connection so we can
15256 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
15259 if (tp->t_fb_ptr) {
15260 uint32_t cnt_free = 0;
15261 struct tcp_rack *rack;
15262 struct rack_sendmap *rsm;
15264 tcp_handle_orphaned_packets(tp);
15265 tp->t_flags &= ~TF_FORCEDATA;
15266 rack = (struct tcp_rack *)tp->t_fb_ptr;
15267 rack_log_pacing_delay_calc(rack,
15271 rack_get_gp_est(rack), /* delRate */
15272 rack_get_lt_bw(rack), /* rttProp */
15273 20, __LINE__, NULL, 0);
15274 #ifdef NETFLIX_SHARED_CWND
15275 if (rack->r_ctl.rc_scw) {
15278 if (rack->r_limit_scw)
15279 limit = max(1, rack->r_ctl.rc_lowest_us_rtt);
15282 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw,
15283 rack->r_ctl.rc_scw_index,
15285 rack->r_ctl.rc_scw = NULL;
15288 if (rack->r_ctl.fsb.tcp_ip_hdr) {
15289 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB);
15290 rack->r_ctl.fsb.tcp_ip_hdr = NULL;
15291 rack->r_ctl.fsb.th = NULL;
15293 if (rack->rc_always_pace) {
15294 tcp_decrement_paced_conn();
15295 rack_undo_cc_pacing(rack);
15296 rack->rc_always_pace = 0;
15298 /* Clean up any options if they were not applied */
15299 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) {
15300 struct deferred_opt_list *dol;
15302 dol = TAILQ_FIRST(&rack->r_ctl.opt_list);
15303 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
15304 free(dol, M_TCPDO);
15306 /* rack does not use force data but other stacks may clear it */
15307 if (rack->r_ctl.crte != NULL) {
15308 tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
15309 rack->rack_hdrw_pacing = 0;
15310 rack->r_ctl.crte = NULL;
15312 #ifdef TCP_BLACKBOX
15313 tcp_log_flowend(tp);
15316 * Lets take a different approach to purging just
15317 * get each one and free it like a cum-ack would and
15318 * not use a foreach loop.
15320 rsm = tqhash_min(rack->r_ctl.tqh);
15322 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK);
15323 rack->r_ctl.rc_num_maps_alloced--;
15324 uma_zfree(rack_zone, rsm);
15325 rsm = tqhash_min(rack->r_ctl.tqh);
15327 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
15329 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
15330 rack->r_ctl.rc_num_maps_alloced--;
15331 rack->rc_free_cnt--;
15333 uma_zfree(rack_zone, rsm);
15334 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
15336 if ((rack->r_ctl.rc_num_maps_alloced > 0) &&
15337 (tcp_bblogging_on(tp))) {
15338 union tcp_log_stackspecific log;
15341 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15342 log.u_bbr.flex8 = 10;
15343 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced;
15344 log.u_bbr.flex2 = rack->rc_free_cnt;
15345 log.u_bbr.flex3 = cnt_free;
15346 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
15347 rsm = tqhash_min(rack->r_ctl.tqh);
15348 log.u_bbr.delRate = (uint64_t)rsm;
15349 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
15350 log.u_bbr.cur_del_rate = (uint64_t)rsm;
15351 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
15352 log.u_bbr.pkt_epoch = __LINE__;
15353 (void)tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
15354 0, &log, false, NULL, NULL, 0, &tv);
15356 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0),
15357 ("rack:%p num_aloc:%u after freeing all?",
15359 rack->r_ctl.rc_num_maps_alloced));
15360 rack->rc_free_cnt = 0;
15361 free(rack->r_ctl.tqh, M_TCPFSB);
15362 rack->r_ctl.tqh = NULL;
15363 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
15364 tp->t_fb_ptr = NULL;
15366 /* Make sure snd_nxt is correctly set */
15367 tp->snd_nxt = tp->snd_max;
15371 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
15373 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) {
15374 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0;
15376 switch (tp->t_state) {
15377 case TCPS_SYN_SENT:
15378 rack->r_state = TCPS_SYN_SENT;
15379 rack->r_substate = rack_do_syn_sent;
15381 case TCPS_SYN_RECEIVED:
15382 rack->r_state = TCPS_SYN_RECEIVED;
15383 rack->r_substate = rack_do_syn_recv;
15385 case TCPS_ESTABLISHED:
15386 rack_set_pace_segments(tp, rack, __LINE__, NULL);
15387 rack->r_state = TCPS_ESTABLISHED;
15388 rack->r_substate = rack_do_established;
15390 case TCPS_CLOSE_WAIT:
15391 rack->r_state = TCPS_CLOSE_WAIT;
15392 rack->r_substate = rack_do_close_wait;
15394 case TCPS_FIN_WAIT_1:
15395 rack_set_pace_segments(tp, rack, __LINE__, NULL);
15396 rack->r_state = TCPS_FIN_WAIT_1;
15397 rack->r_substate = rack_do_fin_wait_1;
15400 rack_set_pace_segments(tp, rack, __LINE__, NULL);
15401 rack->r_state = TCPS_CLOSING;
15402 rack->r_substate = rack_do_closing;
15404 case TCPS_LAST_ACK:
15405 rack_set_pace_segments(tp, rack, __LINE__, NULL);
15406 rack->r_state = TCPS_LAST_ACK;
15407 rack->r_substate = rack_do_lastack;
15409 case TCPS_FIN_WAIT_2:
15410 rack->r_state = TCPS_FIN_WAIT_2;
15411 rack->r_substate = rack_do_fin_wait_2;
15415 case TCPS_TIME_WAIT:
15419 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
15420 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
15425 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
15428 * We received an ack, and then did not
15429 * call send or were bounced out due to the
15430 * hpts was running. Now a timer is up as well, is
15431 * it the right timer?
15433 struct rack_sendmap *rsm;
15436 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
15437 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
15439 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
15440 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
15441 (tmr_up == PACE_TMR_RXT)) {
15442 /* Should be an RXT */
15446 /* Nothing outstanding? */
15447 if (tp->t_flags & TF_DELACK) {
15448 if (tmr_up == PACE_TMR_DELACK)
15449 /* We are supposed to have delayed ack up and we do */
15451 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) {
15453 * if we hit enobufs then we would expect the possibility
15454 * of nothing outstanding and the RXT up (and the hptsi timer).
15457 } else if (((V_tcp_always_keepalive ||
15458 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
15459 (tp->t_state <= TCPS_CLOSING)) &&
15460 (tmr_up == PACE_TMR_KEEP) &&
15461 (tp->snd_max == tp->snd_una)) {
15462 /* We should have keep alive up and we do */
15466 if (SEQ_GT(tp->snd_max, tp->snd_una) &&
15467 ((tmr_up == PACE_TMR_TLP) ||
15468 (tmr_up == PACE_TMR_RACK) ||
15469 (tmr_up == PACE_TMR_RXT))) {
15471 * Either a Rack, TLP or RXT is fine if we
15472 * have outstanding data.
15475 } else if (tmr_up == PACE_TMR_DELACK) {
15477 * If the delayed ack was going to go off
15478 * before the rtx/tlp/rack timer were going to
15479 * expire, then that would be the timer in control.
15480 * Note we don't check the time here trusting the
15486 * Ok the timer originally started is not what we want now.
15487 * We will force the hpts to be stopped if any, and restart
15488 * with the slot set to what was in the saved slot.
15490 if (tcp_in_hpts(rack->rc_inp)) {
15491 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
15494 us_cts = tcp_get_usecs(NULL);
15495 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
15497 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
15499 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
15501 tcp_hpts_remove(rack->rc_inp);
15503 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
15504 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
15509 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts)
15511 if ((SEQ_LT(tp->snd_wl1, seq) ||
15512 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) ||
15513 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) {
15514 /* keep track of pure window updates */
15515 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd))
15516 KMOD_TCPSTAT_INC(tcps_rcvwinupd);
15517 tp->snd_wnd = tiwin;
15518 rack_validate_fo_sendwin_up(tp, rack);
15521 if (tp->snd_wnd > tp->max_sndwnd)
15522 tp->max_sndwnd = tp->snd_wnd;
15523 rack->r_wanted_output = 1;
15524 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) {
15525 tp->snd_wnd = tiwin;
15526 rack_validate_fo_sendwin_up(tp, rack);
15530 /* Not a valid win update */
15533 if (tp->snd_wnd > tp->max_sndwnd)
15534 tp->max_sndwnd = tp->snd_wnd;
15535 /* Do we exit persists? */
15536 if ((rack->rc_in_persist != 0) &&
15537 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
15538 rack->r_ctl.rc_pace_min_segs))) {
15539 rack_exit_persist(tp, rack, cts);
15541 /* Do we enter persists? */
15542 if ((rack->rc_in_persist == 0) &&
15543 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
15544 TCPS_HAVEESTABLISHED(tp->t_state) &&
15545 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
15546 sbavail(&tptosocket(tp)->so_snd) &&
15547 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
15549 * Here the rwnd is less than
15550 * the pacing size, we are established,
15551 * nothing is outstanding, and there is
15552 * data to send. Enter persists.
15554 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack);
15559 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq)
15562 if (tcp_bblogging_on(rack->rc_tp)) {
15563 struct inpcb *inp = tptoinpcb(tp);
15564 union tcp_log_stackspecific log;
15565 struct timeval ltv;
15566 char tcp_hdr_buf[60];
15568 struct timespec ts;
15569 uint32_t orig_snd_una;
15572 #ifdef TCP_REQUEST_TRK
15573 struct http_sendfile_track *http_req;
15575 if (SEQ_GT(ae->ack, tp->snd_una)) {
15576 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1));
15578 http_req = tcp_http_find_req_for_seq(tp, ae->ack);
15581 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15582 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
15583 if (rack->rack_no_prr == 0)
15584 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
15586 log.u_bbr.flex1 = 0;
15587 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
15588 log.u_bbr.use_lt_bw <<= 1;
15589 log.u_bbr.use_lt_bw |= rack->r_might_revert;
15590 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
15591 log.u_bbr.bbr_state = rack->rc_free_cnt;
15592 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
15593 log.u_bbr.pkts_out = tp->t_maxseg;
15594 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
15595 log.u_bbr.flex7 = 1;
15596 log.u_bbr.lost = ae->flags;
15597 log.u_bbr.cwnd_gain = ackval;
15598 log.u_bbr.pacing_gain = 0x2;
15599 if (ae->flags & TSTMP_HDWR) {
15600 /* Record the hardware timestamp if present */
15601 log.u_bbr.flex3 = M_TSTMP;
15602 ts.tv_sec = ae->timestamp / 1000000000;
15603 ts.tv_nsec = ae->timestamp % 1000000000;
15604 ltv.tv_sec = ts.tv_sec;
15605 ltv.tv_usec = ts.tv_nsec / 1000;
15606 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v);
15607 } else if (ae->flags & TSTMP_LRO) {
15608 /* Record the LRO the arrival timestamp */
15609 log.u_bbr.flex3 = M_TSTMP_LRO;
15610 ts.tv_sec = ae->timestamp / 1000000000;
15611 ts.tv_nsec = ae->timestamp % 1000000000;
15612 ltv.tv_sec = ts.tv_sec;
15613 ltv.tv_usec = ts.tv_nsec / 1000;
15614 log.u_bbr.flex5 = tcp_tv_to_usectick(<v);
15616 log.u_bbr.timeStamp = tcp_get_usecs(<v);
15617 /* Log the rcv time */
15618 log.u_bbr.delRate = ae->timestamp;
15619 #ifdef TCP_REQUEST_TRK
15620 log.u_bbr.applimited = tp->t_http_closed;
15621 log.u_bbr.applimited <<= 8;
15622 log.u_bbr.applimited |= tp->t_http_open;
15623 log.u_bbr.applimited <<= 8;
15624 log.u_bbr.applimited |= tp->t_http_req;
15626 /* Copy out any client req info */
15628 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
15630 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
15631 log.u_bbr.rttProp = http_req->timestamp;
15632 log.u_bbr.cur_del_rate = http_req->start;
15633 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
15634 log.u_bbr.flex8 |= 1;
15636 log.u_bbr.flex8 |= 2;
15637 log.u_bbr.bw_inuse = http_req->end;
15639 log.u_bbr.flex6 = http_req->start_seq;
15640 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
15641 log.u_bbr.flex8 |= 4;
15642 log.u_bbr.epoch = http_req->end_seq;
15646 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf));
15647 th = (struct tcphdr *)tcp_hdr_buf;
15648 th->th_seq = ae->seq;
15649 th->th_ack = ae->ack;
15650 th->th_win = ae->win;
15651 /* Now fill in the ports */
15652 th->th_sport = inp->inp_fport;
15653 th->th_dport = inp->inp_lport;
15654 tcp_set_flags(th, ae->flags);
15655 /* Now do we have a timestamp option? */
15656 if (ae->flags & HAS_TSTMP) {
15660 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2);
15661 cp = (u_char *)(th + 1);
15666 *cp = TCPOPT_TIMESTAMP;
15668 *cp = TCPOLEN_TIMESTAMP;
15670 val = htonl(ae->ts_value);
15671 bcopy((char *)&val,
15672 (char *)cp, sizeof(uint32_t));
15673 val = htonl(ae->ts_echo);
15674 bcopy((char *)&val,
15675 (char *)(cp + 4), sizeof(uint32_t));
15677 th->th_off = (sizeof(struct tcphdr) >> 2);
15680 * For sane logging we need to play a little trick.
15681 * If the ack were fully processed we would have moved
15682 * snd_una to high_seq, but since compressed acks are
15683 * processed in two phases, at this point (logging) snd_una
15684 * won't be advanced. So we would see multiple acks showing
15685 * the advancement. We can prevent that by "pretending" that
15686 * snd_una was advanced and then un-advancing it so that the
15687 * logging code has the right value for tlb_snd_una.
15689 if (tp->snd_una != high_seq) {
15690 orig_snd_una = tp->snd_una;
15691 tp->snd_una = high_seq;
15695 TCP_LOG_EVENTP(tp, th,
15696 &tptosocket(tp)->so_rcv,
15697 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0,
15698 0, &log, true, <v);
15700 tp->snd_una = orig_snd_una;
15707 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts)
15711 * A persist or keep-alive was forced out, update our
15712 * min rtt time. Note now worry about lost responses.
15713 * When a subsequent keep-alive or persist times out
15714 * and forced_ack is still on, then the last probe
15715 * was not responded to. In such cases we have a
15716 * sysctl that controls the behavior. Either we apply
15717 * the rtt but with reduced confidence (0). Or we just
15718 * plain don't apply the rtt estimate. Having data flow
15719 * will clear the probe_not_answered flag i.e. cum-ack
15720 * move forward <or> exiting and reentering persists.
15723 rack->forced_ack = 0;
15724 rack->rc_tp->t_rxtshift = 0;
15725 if ((rack->rc_in_persist &&
15726 (tiwin == rack->rc_tp->snd_wnd)) ||
15727 (rack->rc_in_persist == 0)) {
15729 * In persists only apply the RTT update if this is
15730 * a response to our window probe. And that
15731 * means the rwnd sent must match the current
15732 * snd_wnd. If it does not, then we got a
15733 * window update ack instead. For keepalive
15734 * we allow the answer no matter what the window.
15736 * Note that if the probe_not_answered is set then
15737 * the forced_ack_ts is the oldest one i.e. the first
15738 * probe sent that might have been lost. This assures
15739 * us that if we do calculate an RTT it is longer not
15740 * some short thing.
15742 if (rack->rc_in_persist)
15743 counter_u64_add(rack_persists_acks, 1);
15744 us_rtt = us_cts - rack->r_ctl.forced_ack_ts;
15747 if (rack->probe_not_answered == 0) {
15748 rack_apply_updated_usrtt(rack, us_rtt, us_cts);
15749 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1);
15751 /* We have a retransmitted probe here too */
15752 if (rack_apply_rtt_with_reduced_conf) {
15753 rack_apply_updated_usrtt(rack, us_rtt, us_cts);
15754 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1);
15761 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv)
15764 * Handle a "special" compressed ack mbuf. Each incoming
15765 * ack has only four possible dispositions:
15767 * A) It moves the cum-ack forward
15768 * B) It is behind the cum-ack.
15769 * C) It is a window-update ack.
15770 * D) It is a dup-ack.
15772 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES
15773 * in the incoming mbuf. We also need to still pay attention
15774 * to nxt_pkt since there may be another packet after this
15777 #ifdef TCP_ACCOUNTING
15782 struct timespec ts;
15783 struct tcp_rack *rack;
15784 struct tcp_ackent *ae;
15785 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack;
15786 int cnt, i, did_out, ourfinisacked = 0;
15787 struct tcpopt to_holder, *to = NULL;
15788 #ifdef TCP_ACCOUNTING
15789 int win_up_req = 0;
15792 int under_pacing = 0;
15794 #ifdef TCP_ACCOUNTING
15797 rack = (struct tcp_rack *)tp->t_fb_ptr;
15798 if (rack->gp_ready &&
15799 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT))
15802 if (rack->r_state != tp->t_state)
15803 rack_set_state(tp, rack);
15804 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
15805 (tp->t_flags & TF_GPUTINPROG)) {
15807 * We have a goodput in progress
15808 * and we have entered a late state.
15809 * Do we have enough data in the sb
15810 * to handle the GPUT request?
15814 bytes = tp->gput_ack - tp->gput_seq;
15815 if (SEQ_GT(tp->gput_seq, tp->snd_una))
15816 bytes += tp->gput_seq - tp->snd_una;
15817 if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
15819 * There are not enough bytes in the socket
15820 * buffer that have been sent to cover this
15821 * measurement. Cancel it.
15823 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
15824 rack->r_ctl.rc_gp_srtt /*flex1*/,
15826 0, 0, 18, __LINE__, NULL, 0);
15827 tp->t_flags &= ~TF_GPUTINPROG;
15832 KASSERT((m->m_len >= sizeof(struct tcp_ackent)),
15833 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len));
15834 cnt = m->m_len / sizeof(struct tcp_ackent);
15835 counter_u64_add(rack_multi_single_eq, cnt);
15836 high_seq = tp->snd_una;
15837 the_win = tp->snd_wnd;
15838 win_seq = tp->snd_wl1;
15839 win_upd_ack = tp->snd_wl2;
15840 cts = tcp_tv_to_usectick(tv);
15841 ms_cts = tcp_tv_to_mssectick(tv);
15842 rack->r_ctl.rc_rcvtime = cts;
15843 segsiz = ctf_fixed_maxseg(tp);
15844 if ((rack->rc_gp_dyn_mul) &&
15845 (rack->use_fixed_rate == 0) &&
15846 (rack->rc_always_pace)) {
15847 /* Check in on probertt */
15848 rack_check_probe_rtt(rack, cts);
15850 for (i = 0; i < cnt; i++) {
15851 #ifdef TCP_ACCOUNTING
15852 ts_val = get_cyclecount();
15854 rack_clear_rate_sample(rack);
15855 ae = ((mtod(m, struct tcp_ackent *)) + i);
15856 if (ae->flags & TH_FIN)
15857 rack_log_pacing_delay_calc(rack,
15861 rack_get_gp_est(rack), /* delRate */
15862 rack_get_lt_bw(rack), /* rttProp */
15863 20, __LINE__, NULL, 0);
15864 /* Setup the window */
15865 tiwin = ae->win << tp->snd_scale;
15866 if (tiwin > rack->r_ctl.rc_high_rwnd)
15867 rack->r_ctl.rc_high_rwnd = tiwin;
15868 /* figure out the type of ack */
15869 if (SEQ_LT(ae->ack, high_seq)) {
15871 ae->ack_val_set = ACK_BEHIND;
15872 } else if (SEQ_GT(ae->ack, high_seq)) {
15874 ae->ack_val_set = ACK_CUMACK;
15875 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){
15877 ae->ack_val_set = ACK_DUPACK;
15880 ae->ack_val_set = ACK_RWND;
15882 if (rack->sack_attack_disable > 0) {
15883 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__);
15884 rack->r_ctl.ack_during_sd++;
15886 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq);
15887 /* Validate timestamp */
15888 if (ae->flags & HAS_TSTMP) {
15889 /* Setup for a timestamp */
15890 to->to_flags = TOF_TS;
15891 ae->ts_echo -= tp->ts_offset;
15892 to->to_tsecr = ae->ts_echo;
15893 to->to_tsval = ae->ts_value;
15895 * If echoed timestamp is later than the current time, fall back to
15896 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
15897 * were used when this connection was established.
15899 if (TSTMP_GT(ae->ts_echo, ms_cts))
15901 if (tp->ts_recent &&
15902 TSTMP_LT(ae->ts_value, tp->ts_recent)) {
15903 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) {
15904 #ifdef TCP_ACCOUNTING
15905 rdstc = get_cyclecount();
15906 if (rdstc > ts_val) {
15907 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15908 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
15915 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) &&
15916 SEQ_LEQ(tp->last_ack_sent, ae->seq)) {
15917 tp->ts_recent_age = tcp_ts_getticks();
15918 tp->ts_recent = ae->ts_value;
15921 /* Setup for a no options */
15924 /* Update the rcv time and perform idle reduction possibly */
15925 if (tp->t_idle_reduce &&
15926 (tp->snd_max == tp->snd_una) &&
15927 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
15928 counter_u64_add(rack_input_idle_reduces, 1);
15929 rack_cc_after_idle(rack, tp);
15931 tp->t_rcvtime = ticks;
15932 /* Now what about ECN of a chain of pure ACKs? */
15933 if (tcp_ecn_input_segment(tp, ae->flags, 0,
15934 tcp_packets_this_ack(tp, ae->ack),
15936 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__);
15937 #ifdef TCP_ACCOUNTING
15938 /* Count for the specific type of ack in */
15939 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15940 tp->tcp_cnt_counters[ae->ack_val_set]++;
15944 * Note how we could move up these in the determination
15945 * above, but we don't so that way the timestamp checks (and ECN)
15946 * is done first before we do any processing on the ACK.
15947 * The non-compressed path through the code has this
15948 * weakness (noted by @jtl) that it actually does some
15949 * processing before verifying the timestamp information.
15950 * We don't take that path here which is why we set
15951 * the ack_val_set first, do the timestamp and ecn
15952 * processing, and then look at what we have setup.
15954 if (ae->ack_val_set == ACK_BEHIND) {
15956 * Case B flag reordering, if window is not closed
15957 * or it could be a keep-alive or persists
15959 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) {
15960 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
15961 if (rack->r_ctl.rc_reorder_ts == 0)
15962 rack->r_ctl.rc_reorder_ts = 1;
15964 } else if (ae->ack_val_set == ACK_DUPACK) {
15966 rack_strike_dupack(rack);
15967 } else if (ae->ack_val_set == ACK_RWND) {
15969 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) {
15970 ts.tv_sec = ae->timestamp / 1000000000;
15971 ts.tv_nsec = ae->timestamp % 1000000000;
15972 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
15973 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
15975 rack->r_ctl.act_rcv_time = *tv;
15977 if (rack->forced_ack) {
15978 rack_handle_probe_response(rack, tiwin,
15979 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
15981 #ifdef TCP_ACCOUNTING
15984 win_upd_ack = ae->ack;
15987 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts);
15990 if (SEQ_GT(ae->ack, tp->snd_max)) {
15992 * We just send an ack since the incoming
15993 * ack is beyond the largest seq we sent.
15995 if ((tp->t_flags & TF_ACKNOW) == 0) {
15996 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt);
15997 if (tp->t_flags && TF_ACKNOW)
15998 rack->r_wanted_output = 1;
16002 /* If the window changed setup to update */
16003 if (tiwin != tp->snd_wnd) {
16004 win_upd_ack = ae->ack;
16007 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts);
16009 #ifdef TCP_ACCOUNTING
16010 /* Account for the acks */
16011 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16012 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz);
16015 high_seq = ae->ack;
16016 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp))
16017 rack_log_hystart_event(rack, high_seq, 8);
16018 /* Setup our act_rcv_time */
16019 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) {
16020 ts.tv_sec = ae->timestamp / 1000000000;
16021 ts.tv_nsec = ae->timestamp % 1000000000;
16022 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
16023 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
16025 rack->r_ctl.act_rcv_time = *tv;
16027 rack_process_to_cumack(tp, rack, ae->ack, cts, to,
16028 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time));
16029 #ifdef TCP_REQUEST_TRK
16030 rack_http_check_for_comp(rack, high_seq);
16032 if (rack->rc_dsack_round_seen) {
16033 /* Is the dsack round over? */
16034 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) {
16036 rack->rc_dsack_round_seen = 0;
16037 rack_log_dsack_event(rack, 3, __LINE__, 0, 0);
16042 /* And lets be sure to commit the rtt measurements for this ack */
16043 tcp_rack_xmit_timer_commit(rack, tp);
16044 #ifdef TCP_ACCOUNTING
16045 rdstc = get_cyclecount();
16046 if (rdstc > ts_val) {
16047 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16048 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
16049 if (ae->ack_val_set == ACK_CUMACK)
16050 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val);
16055 #ifdef TCP_ACCOUNTING
16056 ts_val = get_cyclecount();
16058 /* Tend to any collapsed window */
16059 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) {
16060 /* The peer collapsed the window */
16061 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__);
16062 } else if (rack->rc_has_collapsed)
16063 rack_un_collapse_window(rack, __LINE__);
16064 if ((rack->r_collapse_point_valid) &&
16065 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point)))
16066 rack->r_collapse_point_valid = 0;
16067 acked_amount = acked = (high_seq - tp->snd_una);
16070 * The draft (v3) calls for us to use SEQ_GEQ, but that
16071 * causes issues when we are just going app limited. Lets
16072 * instead use SEQ_GT <or> where its equal but more data
16075 * Also make sure we are on the last ack of a series. We
16076 * have to have all the ack's processed in queue to know
16077 * if there is something left outstanding.
16080 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) &&
16081 (rack->rc_new_rnd_needed == 0) &&
16083 rack_log_hystart_event(rack, high_seq, 21);
16084 rack->r_ctl.current_round++;
16085 /* Force the next send to setup the next round */
16086 rack->rc_new_rnd_needed = 1;
16087 if (CC_ALGO(tp)->newround != NULL) {
16088 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round);
16092 * Clear the probe not answered flag
16093 * since cum-ack moved forward.
16095 rack->probe_not_answered = 0;
16096 if (rack->sack_attack_disable == 0)
16097 rack_do_decay(rack);
16098 if (acked >= segsiz) {
16100 * You only get credit for
16101 * MSS and greater (and you get extra
16102 * credit for larger cum-ack moves).
16106 ac = acked / segsiz;
16107 rack->r_ctl.ack_count += ac;
16108 counter_u64_add(rack_ack_total, ac);
16110 if (rack->r_ctl.ack_count > 0xfff00000) {
16112 * reduce the number to keep us under
16115 rack->r_ctl.ack_count /= 2;
16116 rack->r_ctl.sack_count /= 2;
16118 if (tp->t_flags & TF_NEEDSYN) {
16120 * T/TCP: Connection was half-synchronized, and our SYN has
16121 * been ACK'd (so connection is now fully synchronized). Go
16122 * to non-starred state, increment snd_una for ACK of SYN,
16123 * and check if we can do window scaling.
16125 tp->t_flags &= ~TF_NEEDSYN;
16127 acked_amount = acked = (high_seq - tp->snd_una);
16129 if (acked > sbavail(&so->so_snd))
16130 acked_amount = sbavail(&so->so_snd);
16131 #ifdef TCP_SAD_DETECTION
16133 * We only care on a cum-ack move if we are in a sack-disabled
16134 * state. We have already added in to the ack_count, and we never
16135 * would disable on a cum-ack move, so we only care to do the
16136 * detection if it may "undo" it, i.e. we were in disabled already.
16138 if (rack->sack_attack_disable)
16139 rack_do_detection(tp, rack, acked_amount, segsiz);
16141 if (IN_FASTRECOVERY(tp->t_flags) &&
16142 (rack->rack_no_prr == 0))
16143 rack_update_prr(tp, rack, acked_amount, high_seq);
16144 if (IN_RECOVERY(tp->t_flags)) {
16145 if (SEQ_LT(high_seq, tp->snd_recover) &&
16146 (SEQ_LT(high_seq, tp->snd_max))) {
16147 tcp_rack_partialack(tp);
16149 rack_post_recovery(tp, high_seq);
16153 /* Handle the rack-log-ack part (sendmap) */
16154 if ((sbused(&so->so_snd) == 0) &&
16155 (acked > acked_amount) &&
16156 (tp->t_state >= TCPS_FIN_WAIT_1) &&
16157 (tp->t_flags & TF_SENTFIN)) {
16159 * We must be sure our fin
16160 * was sent and acked (we can be
16161 * in FIN_WAIT_1 without having
16166 * Lets make sure snd_una is updated
16167 * since most likely acked_amount = 0 (it
16170 tp->snd_una = high_seq;
16172 /* Did we make a RTO error? */
16173 if ((tp->t_flags & TF_PREVVALID) &&
16174 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
16175 tp->t_flags &= ~TF_PREVVALID;
16176 if (tp->t_rxtshift == 1 &&
16177 (int)(ticks - tp->t_badrxtwin) < 0)
16178 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__);
16180 /* Handle the data in the socket buffer */
16181 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1);
16182 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
16183 if (acked_amount > 0) {
16184 struct mbuf *mfree;
16186 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery);
16187 SOCKBUF_LOCK(&so->so_snd);
16188 mfree = sbcut_locked(&so->so_snd, acked_amount);
16189 tp->snd_una = high_seq;
16190 /* Note we want to hold the sb lock through the sendmap adjust */
16191 rack_adjust_sendmap_head(rack, &so->so_snd);
16192 /* Wake up the socket if we have room to write more */
16193 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
16194 sowwakeup_locked(so);
16195 if ((recovery == 1) &&
16196 (rack->excess_rxt_on) &&
16197 (rack->r_cwnd_was_clamped == 0)) {
16198 do_rack_excess_rxt(tp, rack);
16199 } else if (rack->r_cwnd_was_clamped)
16200 do_rack_check_for_unclamp(tp, rack);
16203 /* update progress */
16204 tp->t_acktime = ticks;
16205 rack_log_progress_event(rack, tp, tp->t_acktime,
16206 PROGRESS_UPDATE, __LINE__);
16207 /* Clear out shifts and such */
16208 tp->t_rxtshift = 0;
16209 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
16210 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
16211 rack->rc_tlp_in_progress = 0;
16212 rack->r_ctl.rc_tlp_cnt_out = 0;
16213 /* Send recover and snd_nxt must be dragged along */
16214 if (SEQ_GT(tp->snd_una, tp->snd_recover))
16215 tp->snd_recover = tp->snd_una;
16216 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
16217 tp->snd_nxt = tp->snd_una;
16219 * If the RXT timer is running we want to
16220 * stop it, so we can restart a TLP (or new RXT).
16222 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
16223 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
16224 tp->snd_wl2 = high_seq;
16226 if (under_pacing &&
16227 (rack->use_fixed_rate == 0) &&
16228 (rack->in_probe_rtt == 0) &&
16229 rack->rc_gp_dyn_mul &&
16230 rack->rc_always_pace) {
16231 /* Check if we are dragging bottom */
16232 rack_check_bottom_drag(tp, rack, so);
16234 if (tp->snd_una == tp->snd_max) {
16235 tp->t_flags &= ~TF_PREVVALID;
16236 rack->r_ctl.retran_during_recovery = 0;
16237 rack->rc_suspicious = 0;
16238 rack->r_ctl.dsack_byte_cnt = 0;
16239 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
16240 if (rack->r_ctl.rc_went_idle_time == 0)
16241 rack->r_ctl.rc_went_idle_time = 1;
16242 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
16243 if (sbavail(&tptosocket(tp)->so_snd) == 0)
16245 /* Set so we might enter persists... */
16246 rack->r_wanted_output = 1;
16247 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
16248 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
16249 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
16250 (sbavail(&so->so_snd) == 0) &&
16251 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
16253 * The socket was gone and the
16254 * peer sent data (not now in the past), time to
16257 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
16258 /* tcp_close will kill the inp pre-log the Reset */
16259 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
16260 #ifdef TCP_ACCOUNTING
16261 rdstc = get_cyclecount();
16262 if (rdstc > ts_val) {
16263 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16264 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
16265 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
16270 tp = tcp_close(tp);
16272 #ifdef TCP_ACCOUNTING
16278 * We would normally do drop-with-reset which would
16279 * send back a reset. We can't since we don't have
16280 * all the needed bits. Instead lets arrange for
16281 * a call to tcp_output(). That way since we
16282 * are in the closed state we will generate a reset.
16284 * Note if tcp_accounting is on we don't unpin since
16285 * we do that after the goto label.
16287 goto send_out_a_rst;
16289 if ((sbused(&so->so_snd) == 0) &&
16290 (tp->t_state >= TCPS_FIN_WAIT_1) &&
16291 (tp->t_flags & TF_SENTFIN)) {
16293 * If we can't receive any more data, then closing user can
16294 * proceed. Starting the timer is contrary to the
16295 * specification, but if we don't get a FIN we'll hang
16299 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
16300 soisdisconnected(so);
16301 tcp_timer_activate(tp, TT_2MSL,
16302 (tcp_fast_finwait2_recycle ?
16303 tcp_finwait2_timeout :
16306 if (ourfinisacked == 0) {
16308 * We don't change to fin-wait-2 if we have our fin acked
16309 * which means we are probably in TCPS_CLOSING.
16311 tcp_state_change(tp, TCPS_FIN_WAIT_2);
16315 /* Wake up the socket if we have room to write more */
16316 if (sbavail(&so->so_snd)) {
16317 rack->r_wanted_output = 1;
16318 if (ctf_progress_timeout_check(tp, true)) {
16319 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
16320 tp, tick, PROGRESS_DROP, __LINE__);
16322 * We cheat here and don't send a RST, we should send one
16323 * when the pacer drops the connection.
16325 #ifdef TCP_ACCOUNTING
16326 rdstc = get_cyclecount();
16327 if (rdstc > ts_val) {
16328 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16329 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
16330 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
16335 (void)tcp_drop(tp, ETIMEDOUT);
16340 if (ourfinisacked) {
16341 switch(tp->t_state) {
16343 #ifdef TCP_ACCOUNTING
16344 rdstc = get_cyclecount();
16345 if (rdstc > ts_val) {
16346 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16347 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
16348 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
16357 case TCPS_LAST_ACK:
16358 #ifdef TCP_ACCOUNTING
16359 rdstc = get_cyclecount();
16360 if (rdstc > ts_val) {
16361 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16362 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
16363 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
16368 tp = tcp_close(tp);
16369 ctf_do_drop(m, tp);
16372 case TCPS_FIN_WAIT_1:
16373 #ifdef TCP_ACCOUNTING
16374 rdstc = get_cyclecount();
16375 if (rdstc > ts_val) {
16376 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16377 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
16378 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
16382 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
16383 soisdisconnected(so);
16384 tcp_timer_activate(tp, TT_2MSL,
16385 (tcp_fast_finwait2_recycle ?
16386 tcp_finwait2_timeout :
16389 tcp_state_change(tp, TCPS_FIN_WAIT_2);
16395 if (rack->r_fast_output) {
16397 * We re doing fast output.. can we expand that?
16399 rack_gain_for_fastoutput(rack, tp, so, acked_amount);
16401 #ifdef TCP_ACCOUNTING
16402 rdstc = get_cyclecount();
16403 if (rdstc > ts_val) {
16404 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16405 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
16406 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
16410 } else if (win_up_req) {
16411 rdstc = get_cyclecount();
16412 if (rdstc > ts_val) {
16413 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16414 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val);
16419 /* Now is there a next packet, if so we are done */
16423 #ifdef TCP_ACCOUNTING
16426 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs);
16429 rack_handle_might_revert(tp, rack);
16430 ctf_calc_rwin(so, tp);
16431 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
16433 if (tcp_output(tp) < 0) {
16434 #ifdef TCP_ACCOUNTING
16441 if (rack->rc_inp->inp_hpts_calls)
16442 rack->rc_inp->inp_hpts_calls = 0;
16443 rack_free_trim(rack);
16444 #ifdef TCP_ACCOUNTING
16447 rack_timer_audit(tp, rack, &so->so_snd);
16448 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs);
16452 #define TCP_LRO_TS_OPTION \
16453 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \
16454 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
16457 rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
16458 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt,
16459 struct timeval *tv)
16461 struct inpcb *inp = tptoinpcb(tp);
16462 struct socket *so = tptosocket(tp);
16463 #ifdef TCP_ACCOUNTING
16466 int32_t thflags, retval, did_out = 0;
16467 int32_t way_out = 0;
16469 * cts - is the current time from tv (caller gets ts) in microseconds.
16470 * ms_cts - is the current time from tv in milliseconds.
16471 * us_cts - is the time that LRO or hardware actually got the packet in microseconds.
16473 uint32_t cts, us_cts, ms_cts;
16474 uint32_t tiwin, high_seq;
16475 struct timespec ts;
16477 struct tcp_rack *rack;
16478 struct rack_sendmap *rsm;
16479 int32_t prev_state = 0;
16481 int slot_remaining = 0;
16482 #ifdef TCP_ACCOUNTING
16483 int ack_val_set = 0xf;
16487 NET_EPOCH_ASSERT();
16488 INP_WLOCK_ASSERT(inp);
16491 * tv passed from common code is from either M_TSTMP_LRO or
16492 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present.
16494 rack = (struct tcp_rack *)tp->t_fb_ptr;
16495 if (rack->rack_deferred_inited == 0) {
16497 * If we are the connecting socket we will
16498 * hit rack_init() when no sequence numbers
16499 * are setup. This makes it so we must defer
16500 * some initialization. Call that now.
16502 rack_deferred_init(tp, rack);
16505 * Check to see if we need to skip any output plans. This
16506 * can happen in the non-LRO path where we are pacing and
16507 * must process the ack coming in but need to defer sending
16508 * anything becase a pacing timer is running.
16510 us_cts = tcp_tv_to_usectick(tv);
16511 if (m->m_flags & M_ACKCMP) {
16513 * All compressed ack's are ack's by definition so
16514 * remove any ack required flag and then do the processing.
16516 rack->rc_ack_required = 0;
16517 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv));
16519 thflags = tcp_get_flags(th);
16520 if ((rack->rc_always_pace == 1) &&
16521 (rack->rc_ack_can_sendout_data == 0) &&
16522 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
16523 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) {
16525 * Ok conditions are right for queuing the packets
16526 * but we do have to check the flags in the inp, it
16527 * could be, if a sack is present, we want to be awoken and
16528 * so should process the packets.
16530 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts;
16531 if (rack->rc_inp->inp_flags2 & INP_DONT_SACK_QUEUE) {
16535 * If there is no options, or just a
16536 * timestamp option, we will want to queue
16537 * the packets. This is the same that LRO does
16538 * and will need to change with accurate ECN.
16543 optlen = (th->th_off << 2) - sizeof(struct tcphdr);
16544 ts_ptr = (uint32_t *)(th + 1);
16545 if ((optlen == 0) ||
16546 ((optlen == TCPOLEN_TSTAMP_APPA) &&
16547 (*ts_ptr == TCP_LRO_TS_OPTION)))
16550 if ((no_output == 1) && (slot_remaining < tcp_min_hptsi_time)) {
16552 * It is unrealistic to think we can pace in less than
16553 * the minimum granularity of the pacer (def:250usec). So
16554 * if we have less than that time remaining we should go
16555 * ahead and allow output to be "early". We will attempt to
16556 * make up for it in any pacing time we try to apply on
16557 * the outbound packet.
16563 * If there is a RST or FIN lets dump out the bw
16564 * with a FIN the connection may go on but we
16567 if ((thflags & TH_FIN) || (thflags & TH_RST))
16568 rack_log_pacing_delay_calc(rack,
16572 rack_get_gp_est(rack), /* delRate */
16573 rack_get_lt_bw(rack), /* rttProp */
16574 20, __LINE__, NULL, 0);
16575 if (m->m_flags & M_ACKCMP) {
16576 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp);
16578 cts = tcp_tv_to_usectick(tv);
16579 ms_cts = tcp_tv_to_mssectick(tv);
16580 nsegs = m->m_pkthdr.lro_nsegs;
16581 counter_u64_add(rack_proc_non_comp_ack, 1);
16582 #ifdef TCP_ACCOUNTING
16584 if (thflags & TH_ACK)
16585 ts_val = get_cyclecount();
16587 if ((m->m_flags & M_TSTMP) ||
16588 (m->m_flags & M_TSTMP_LRO)) {
16589 mbuf_tstmp2timespec(m, &ts);
16590 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
16591 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
16593 rack->r_ctl.act_rcv_time = *tv;
16594 kern_prefetch(rack, &prev_state);
16597 * Unscale the window into a 32-bit value. For the SYN_SENT state
16598 * the scale is zero.
16600 tiwin = th->th_win << tp->snd_scale;
16601 #ifdef TCP_ACCOUNTING
16602 if (thflags & TH_ACK) {
16604 * We have a tradeoff here. We can either do what we are
16605 * doing i.e. pinning to this CPU and then doing the accounting
16606 * <or> we could do a critical enter, setup the rdtsc and cpu
16607 * as in below, and then validate we are on the same CPU on
16608 * exit. I have choosen to not do the critical enter since
16609 * that often will gain you a context switch, and instead lock
16610 * us (line above this if) to the same CPU with sched_pin(). This
16611 * means we may be context switched out for a higher priority
16612 * interupt but we won't be moved to another CPU.
16614 * If this occurs (which it won't very often since we most likely
16615 * are running this code in interupt context and only a higher
16616 * priority will bump us ... clock?) we will falsely add in
16617 * to the time the interupt processing time plus the ack processing
16618 * time. This is ok since its a rare event.
16620 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin,
16621 ctf_fixed_maxseg(tp));
16625 * Parse options on any incoming segment.
16627 memset(&to, 0, sizeof(to));
16628 tcp_dooptions(&to, (u_char *)(th + 1),
16629 (th->th_off << 2) - sizeof(struct tcphdr),
16630 (thflags & TH_SYN) ? TO_SYN : 0);
16631 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
16633 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
16636 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
16637 (tp->t_flags & TF_GPUTINPROG)) {
16639 * We have a goodput in progress
16640 * and we have entered a late state.
16641 * Do we have enough data in the sb
16642 * to handle the GPUT request?
16646 bytes = tp->gput_ack - tp->gput_seq;
16647 if (SEQ_GT(tp->gput_seq, tp->snd_una))
16648 bytes += tp->gput_seq - tp->snd_una;
16649 if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
16651 * There are not enough bytes in the socket
16652 * buffer that have been sent to cover this
16653 * measurement. Cancel it.
16655 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
16656 rack->r_ctl.rc_gp_srtt /*flex1*/,
16658 0, 0, 18, __LINE__, NULL, 0);
16659 tp->t_flags &= ~TF_GPUTINPROG;
16662 high_seq = th->th_ack;
16663 if (tcp_bblogging_on(rack->rc_tp)) {
16664 union tcp_log_stackspecific log;
16665 struct timeval ltv;
16666 #ifdef TCP_REQUEST_TRK
16667 struct http_sendfile_track *http_req;
16669 if (SEQ_GT(th->th_ack, tp->snd_una)) {
16670 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1));
16672 http_req = tcp_http_find_req_for_seq(tp, th->th_ack);
16675 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
16676 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
16677 if (rack->rack_no_prr == 0)
16678 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
16680 log.u_bbr.flex1 = 0;
16681 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
16682 log.u_bbr.use_lt_bw <<= 1;
16683 log.u_bbr.use_lt_bw |= rack->r_might_revert;
16684 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
16685 log.u_bbr.bbr_state = rack->rc_free_cnt;
16686 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
16687 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
16688 log.u_bbr.flex3 = m->m_flags;
16689 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
16690 log.u_bbr.lost = thflags;
16691 log.u_bbr.pacing_gain = 0x1;
16692 #ifdef TCP_ACCOUNTING
16693 log.u_bbr.cwnd_gain = ack_val_set;
16695 log.u_bbr.flex7 = 2;
16696 if (m->m_flags & M_TSTMP) {
16697 /* Record the hardware timestamp if present */
16698 mbuf_tstmp2timespec(m, &ts);
16699 ltv.tv_sec = ts.tv_sec;
16700 ltv.tv_usec = ts.tv_nsec / 1000;
16701 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v);
16702 } else if (m->m_flags & M_TSTMP_LRO) {
16703 /* Record the LRO the arrival timestamp */
16704 mbuf_tstmp2timespec(m, &ts);
16705 ltv.tv_sec = ts.tv_sec;
16706 ltv.tv_usec = ts.tv_nsec / 1000;
16707 log.u_bbr.flex5 = tcp_tv_to_usectick(<v);
16709 log.u_bbr.timeStamp = tcp_get_usecs(<v);
16710 /* Log the rcv time */
16711 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp;
16712 #ifdef TCP_REQUEST_TRK
16713 log.u_bbr.applimited = tp->t_http_closed;
16714 log.u_bbr.applimited <<= 8;
16715 log.u_bbr.applimited |= tp->t_http_open;
16716 log.u_bbr.applimited <<= 8;
16717 log.u_bbr.applimited |= tp->t_http_req;
16719 /* Copy out any client req info */
16721 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
16723 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
16724 log.u_bbr.rttProp = http_req->timestamp;
16725 log.u_bbr.cur_del_rate = http_req->start;
16726 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
16727 log.u_bbr.flex8 |= 1;
16729 log.u_bbr.flex8 |= 2;
16730 log.u_bbr.bw_inuse = http_req->end;
16732 log.u_bbr.flex6 = http_req->start_seq;
16733 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
16734 log.u_bbr.flex8 |= 4;
16735 log.u_bbr.epoch = http_req->end_seq;
16739 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
16740 tlen, &log, true, <v);
16742 /* Remove ack required flag if set, we have one */
16743 if (thflags & TH_ACK)
16744 rack->rc_ack_required = 0;
16745 if (rack->sack_attack_disable > 0) {
16746 rack->r_ctl.ack_during_sd++;
16747 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__);
16749 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
16753 goto done_with_input;
16756 * If a segment with the ACK-bit set arrives in the SYN-SENT state
16757 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9.
16759 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
16760 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
16761 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
16762 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
16763 #ifdef TCP_ACCOUNTING
16769 * If timestamps were negotiated during SYN/ACK and a
16770 * segment without a timestamp is received, silently drop
16771 * the segment, unless it is a RST segment or missing timestamps are
16773 * See section 3.2 of RFC 7323.
16775 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) &&
16776 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) {
16780 goto done_with_input;
16784 * Segment received on connection. Reset idle time and keep-alive
16785 * timer. XXX: This should be done after segment validation to
16786 * ignore broken/spoofed segs.
16788 if (tp->t_idle_reduce &&
16789 (tp->snd_max == tp->snd_una) &&
16790 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
16791 counter_u64_add(rack_input_idle_reduces, 1);
16792 rack_cc_after_idle(rack, tp);
16794 tp->t_rcvtime = ticks;
16796 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
16798 if (tiwin > rack->r_ctl.rc_high_rwnd)
16799 rack->r_ctl.rc_high_rwnd = tiwin;
16801 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
16802 * this to occur after we've validated the segment.
16804 if (tcp_ecn_input_segment(tp, thflags, tlen,
16805 tcp_packets_this_ack(tp, th->th_ack),
16807 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__);
16810 * If echoed timestamp is later than the current time, fall back to
16811 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
16812 * were used when this connection was established.
16814 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
16815 to.to_tsecr -= tp->ts_offset;
16816 if (TSTMP_GT(to.to_tsecr, ms_cts))
16821 * If its the first time in we need to take care of options and
16822 * verify we can do SACK for rack!
16824 if (rack->r_state == 0) {
16825 /* Should be init'd by rack_init() */
16826 KASSERT(rack->rc_inp != NULL,
16827 ("%s: rack->rc_inp unexpectedly NULL", __func__));
16828 if (rack->rc_inp == NULL) {
16829 rack->rc_inp = inp;
16833 * Process options only when we get SYN/ACK back. The SYN
16834 * case for incoming connections is handled in tcp_syncache.
16835 * According to RFC1323 the window field in a SYN (i.e., a
16836 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
16837 * this is traditional behavior, may need to be cleaned up.
16839 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
16840 /* Handle parallel SYN for ECN */
16841 tcp_ecn_input_parallel_syn(tp, thflags, iptos);
16842 if ((to.to_flags & TOF_SCALE) &&
16843 (tp->t_flags & TF_REQ_SCALE)) {
16844 tp->t_flags |= TF_RCVD_SCALE;
16845 tp->snd_scale = to.to_wscale;
16847 tp->t_flags &= ~TF_REQ_SCALE;
16849 * Initial send window. It will be updated with the
16850 * next incoming segment to the scaled value.
16852 tp->snd_wnd = th->th_win;
16853 rack_validate_fo_sendwin_up(tp, rack);
16854 if ((to.to_flags & TOF_TS) &&
16855 (tp->t_flags & TF_REQ_TSTMP)) {
16856 tp->t_flags |= TF_RCVD_TSTMP;
16857 tp->ts_recent = to.to_tsval;
16858 tp->ts_recent_age = cts;
16860 tp->t_flags &= ~TF_REQ_TSTMP;
16861 if (to.to_flags & TOF_MSS) {
16862 tcp_mss(tp, to.to_mss);
16864 if ((tp->t_flags & TF_SACK_PERMIT) &&
16865 (to.to_flags & TOF_SACKPERM) == 0)
16866 tp->t_flags &= ~TF_SACK_PERMIT;
16867 if (IS_FASTOPEN(tp->t_flags)) {
16868 if (to.to_flags & TOF_FASTOPEN) {
16871 if (to.to_flags & TOF_MSS)
16874 if ((inp->inp_vflag & INP_IPV6) != 0)
16878 tcp_fastopen_update_cache(tp, mss,
16879 to.to_tfo_len, to.to_tfo_cookie);
16881 tcp_fastopen_disable_path(tp);
16885 * At this point we are at the initial call. Here we decide
16886 * if we are doing RACK or not. We do this by seeing if
16887 * TF_SACK_PERMIT is set and the sack-not-required is clear.
16888 * The code now does do dup-ack counting so if you don't
16889 * switch back you won't get rack & TLP, but you will still
16893 if ((rack_sack_not_required == 0) &&
16894 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
16895 tcp_switch_back_to_default(tp);
16896 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen,
16898 #ifdef TCP_ACCOUNTING
16904 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
16906 if (thflags & TH_FIN)
16907 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
16908 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
16909 if ((rack->rc_gp_dyn_mul) &&
16910 (rack->use_fixed_rate == 0) &&
16911 (rack->rc_always_pace)) {
16912 /* Check in on probertt */
16913 rack_check_probe_rtt(rack, us_cts);
16915 rack_clear_rate_sample(rack);
16916 if ((rack->forced_ack) &&
16917 ((tcp_get_flags(th) & TH_RST) == 0)) {
16918 rack_handle_probe_response(rack, tiwin, us_cts);
16921 * This is the one exception case where we set the rack state
16922 * always. All other times (timers etc) we must have a rack-state
16923 * set (so we assure we have done the checks above for SACK).
16925 rack->r_ctl.rc_rcvtime = cts;
16926 if (rack->r_state != tp->t_state)
16927 rack_set_state(tp, rack);
16928 if (SEQ_GT(th->th_ack, tp->snd_una) &&
16929 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL)
16930 kern_prefetch(rsm, &prev_state);
16931 prev_state = rack->r_state;
16932 if ((thflags & TH_RST) &&
16933 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
16934 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
16935 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) {
16936 /* The connection will be killed by a reset check the tracepoint */
16937 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV);
16939 retval = (*rack->r_substate) (m, th, so,
16940 tp, &to, drop_hdrlen,
16941 tlen, tiwin, thflags, nxt_pkt, iptos);
16944 * If retval is 1 the tcb is unlocked and most likely the tp
16947 INP_WLOCK_ASSERT(inp);
16948 if ((rack->rc_gp_dyn_mul) &&
16949 (rack->rc_always_pace) &&
16950 (rack->use_fixed_rate == 0) &&
16951 rack->in_probe_rtt &&
16952 (rack->r_ctl.rc_time_probertt_starts == 0)) {
16954 * If we are going for target, lets recheck before
16957 rack_check_probe_rtt(rack, us_cts);
16959 if (rack->set_pacing_done_a_iw == 0) {
16960 /* How much has been acked? */
16961 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) {
16962 /* We have enough to set in the pacing segment size */
16963 rack->set_pacing_done_a_iw = 1;
16964 rack_set_pace_segments(tp, rack, __LINE__, NULL);
16967 tcp_rack_xmit_timer_commit(rack, tp);
16968 #ifdef TCP_ACCOUNTING
16970 * If we set the ack_val_se to what ack processing we are doing
16971 * we also want to track how many cycles we burned. Note
16972 * the bits after tcp_output we let be "free". This is because
16973 * we are also tracking the tcp_output times as well. Note the
16974 * use of 0xf here since we only have 11 counter (0 - 0xa) and
16975 * 0xf cannot be returned and is what we initialize it too to
16976 * indicate we are not doing the tabulations.
16978 if (ack_val_set != 0xf) {
16981 crtsc = get_cyclecount();
16982 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16983 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val);
16987 if ((nxt_pkt == 0) && (no_output == 0)) {
16988 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
16990 if (tcp_output(tp) < 0) {
16991 #ifdef TCP_ACCOUNTING
16998 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
16999 rack_free_trim(rack);
17000 } else if ((no_output == 1) &&
17002 (tcp_in_hpts(rack->rc_inp) == 0)) {
17004 * We are not in hpts and we had a pacing timer up. Use
17005 * the remaining time (slot_remaining) to restart the timer.
17007 KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp));
17008 rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0);
17009 rack_free_trim(rack);
17011 /* Clear the flag, it may have been cleared by output but we may not have */
17012 if ((nxt_pkt == 0) && (inp->inp_hpts_calls))
17013 inp->inp_hpts_calls = 0;
17014 /* Update any rounds needed */
17015 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp))
17016 rack_log_hystart_event(rack, high_seq, 8);
17018 * The draft (v3) calls for us to use SEQ_GEQ, but that
17019 * causes issues when we are just going app limited. Lets
17020 * instead use SEQ_GT <or> where its equal but more data
17023 * Also make sure we are on the last ack of a series. We
17024 * have to have all the ack's processed in queue to know
17025 * if there is something left outstanding.
17027 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) &&
17028 (rack->rc_new_rnd_needed == 0) &&
17030 rack_log_hystart_event(rack, tp->snd_una, 21);
17031 rack->r_ctl.current_round++;
17032 /* Force the next send to setup the next round */
17033 rack->rc_new_rnd_needed = 1;
17034 if (CC_ALGO(tp)->newround != NULL) {
17035 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round);
17038 if ((nxt_pkt == 0) &&
17039 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
17040 (SEQ_GT(tp->snd_max, tp->snd_una) ||
17041 (tp->t_flags & TF_DELACK) ||
17042 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
17043 (tp->t_state <= TCPS_CLOSING)))) {
17044 /* We could not send (probably in the hpts but stopped the timer earlier)? */
17045 if ((tp->snd_max == tp->snd_una) &&
17046 ((tp->t_flags & TF_DELACK) == 0) &&
17047 (tcp_in_hpts(rack->rc_inp)) &&
17048 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
17049 /* keep alive not needed if we are hptsi output yet */
17053 if (tcp_in_hpts(inp)) {
17054 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
17055 us_cts = tcp_get_usecs(NULL);
17056 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
17058 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
17061 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
17063 tcp_hpts_remove(inp);
17065 if (late && (did_out == 0)) {
17067 * We are late in the sending
17068 * and we did not call the output
17069 * (this probably should not happen).
17071 goto do_output_now;
17073 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
17076 } else if (nxt_pkt == 0) {
17077 /* Do we have the correct timer running? */
17078 rack_timer_audit(tp, rack, &so->so_snd);
17082 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs));
17084 rack->r_wanted_output = 0;
17086 #ifdef TCP_ACCOUNTING
17093 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
17094 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos)
17098 /* First lets see if we have old packets */
17099 if (!STAILQ_EMPTY(&tp->t_inqueue)) {
17100 if (ctf_do_queued_segments(tp, 1)) {
17105 if (m->m_flags & M_TSTMP_LRO) {
17106 mbuf_tstmp2timeval(m, &tv);
17108 /* Should not be should we kassert instead? */
17109 tcp_get_usecs(&tv);
17111 if (rack_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos, 0,
17113 INP_WUNLOCK(tptoinpcb(tp));
17117 struct rack_sendmap *
17118 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
17120 struct rack_sendmap *rsm = NULL;
17122 uint32_t srtt = 0, thresh = 0, ts_low = 0;
17125 /* Return the next guy to be re-transmitted */
17126 if (tqhash_empty(rack->r_ctl.tqh)) {
17129 if (tp->t_flags & TF_SENTFIN) {
17130 /* retran the end FIN? */
17133 /* ok lets look at this one */
17134 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
17135 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) {
17138 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
17141 rsm = rack_find_lowest_rsm(rack);
17146 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) ||
17147 (rack->sack_attack_disable > 0)) {
17150 if ((no_sack > 0) &&
17151 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
17153 * No sack so we automatically do the 3 strikes and
17154 * retransmit (no rack timer would be started).
17158 if (rsm->r_flags & RACK_ACKED) {
17161 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) &&
17162 (rsm->r_dupack < DUP_ACK_THRESHOLD)) {
17163 /* Its not yet ready */
17166 srtt = rack_grab_rtt(tp, rack);
17167 idx = rsm->r_rtr_cnt - 1;
17168 ts_low = (uint32_t)rsm->r_tim_lastsent[idx];
17169 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
17170 if ((tsused == ts_low) ||
17171 (TSTMP_LT(tsused, ts_low))) {
17172 /* No time since sending */
17175 if ((tsused - ts_low) < thresh) {
17176 /* It has not been long enough yet */
17179 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
17180 ((rsm->r_flags & RACK_SACK_PASSED) &&
17181 (rack->sack_attack_disable == 0))) {
17183 * We have passed the dup-ack threshold <or>
17184 * a SACK has indicated this is missing.
17185 * Note that if you are a declared attacker
17186 * it is only the dup-ack threshold that
17187 * will cause retransmits.
17189 /* log retransmit reason */
17190 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1);
17191 rack->r_fast_output = 0;
17198 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
17199 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method,
17200 int line, struct rack_sendmap *rsm, uint8_t quality)
17202 if (tcp_bblogging_on(rack->rc_tp)) {
17203 union tcp_log_stackspecific log;
17206 memset(&log, 0, sizeof(log));
17207 log.u_bbr.flex1 = slot;
17208 log.u_bbr.flex2 = len;
17209 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs;
17210 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs;
17211 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss;
17212 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca;
17213 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data;
17214 log.u_bbr.use_lt_bw <<= 1;
17215 log.u_bbr.use_lt_bw |= rack->r_late;
17216 log.u_bbr.use_lt_bw <<= 1;
17217 log.u_bbr.use_lt_bw |= rack->r_early;
17218 log.u_bbr.use_lt_bw <<= 1;
17219 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
17220 log.u_bbr.use_lt_bw <<= 1;
17221 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
17222 log.u_bbr.use_lt_bw <<= 1;
17223 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
17224 log.u_bbr.use_lt_bw <<= 1;
17225 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
17226 log.u_bbr.use_lt_bw <<= 1;
17227 log.u_bbr.use_lt_bw |= rack->gp_ready;
17228 log.u_bbr.pkt_epoch = line;
17229 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed;
17230 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early;
17231 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec;
17232 log.u_bbr.bw_inuse = bw_est;
17233 log.u_bbr.delRate = bw;
17234 if (rack->r_ctl.gp_bw == 0)
17235 log.u_bbr.cur_del_rate = 0;
17237 log.u_bbr.cur_del_rate = rack_get_bw(rack);
17238 log.u_bbr.rttProp = len_time;
17239 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt;
17240 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit;
17241 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
17242 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) {
17243 /* We are in slow start */
17244 log.u_bbr.flex7 = 1;
17246 /* we are on congestion avoidance */
17247 log.u_bbr.flex7 = 0;
17249 log.u_bbr.flex8 = method;
17250 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
17251 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
17252 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec;
17253 log.u_bbr.cwnd_gain <<= 1;
17254 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
17255 log.u_bbr.cwnd_gain <<= 1;
17256 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
17257 log.u_bbr.bbr_substate = quality;
17258 log.u_bbr.bbr_state = rack->dgp_on;
17259 log.u_bbr.bbr_state <<= 1;
17260 log.u_bbr.bbr_state |= rack->r_fill_less_agg;
17261 log.u_bbr.bbr_state <<= 1;
17262 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd;
17263 log.u_bbr.bbr_state <<= 2;
17264 log.u_bbr.bbr_state |= rack->r_pacing_discount;
17265 log.u_bbr.flex7 = ((rack->r_ctl.pacing_discount_amm << 1) | log.u_bbr.flex7);
17266 TCP_LOG_EVENTP(rack->rc_tp, NULL,
17267 &rack->rc_inp->inp_socket->so_rcv,
17268 &rack->rc_inp->inp_socket->so_snd,
17269 BBR_LOG_HPTSI_CALC, 0,
17270 0, &log, false, &tv);
17275 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss)
17277 uint32_t new_tso, user_max, pace_one;
17279 user_max = rack->rc_user_set_max_segs * mss;
17280 if (rack->rc_force_max_seg) {
17283 if (rack->use_fixed_rate &&
17284 ((rack->r_ctl.crte == NULL) ||
17285 (bw != rack->r_ctl.crte->rate))) {
17286 /* Use the user mss since we are not exactly matched */
17289 if (rack_pace_one_seg ||
17290 (rack->r_ctl.rc_user_set_min_segs == 1))
17295 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss,
17296 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor);
17297 if (new_tso > user_max)
17298 new_tso = user_max;
17299 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) {
17300 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso)
17301 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss;
17303 if (rack->r_ctl.rc_user_set_min_segs &&
17304 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso))
17305 new_tso = rack->r_ctl.rc_user_set_min_segs * mss;
17310 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced)
17312 uint64_t lentim, fill_bw;
17314 /* Lets first see if we are full, if so continue with normal rate */
17315 rack->r_via_fill_cw = 0;
17316 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use)
17318 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd)
17320 if (rack->r_ctl.rc_last_us_rtt == 0)
17322 if (rack->rc_pace_fill_if_rttin_range &&
17323 (rack->r_ctl.rc_last_us_rtt >=
17324 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) {
17325 /* The rtt is huge, N * smallest, lets not fill */
17329 * first lets calculate the b/w based on the last us-rtt
17330 * and the the smallest send window.
17332 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use);
17333 /* Take the rwnd if its smaller */
17334 if (fill_bw > rack->rc_tp->snd_wnd)
17335 fill_bw = rack->rc_tp->snd_wnd;
17336 /* Now lets make it into a b/w */
17337 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC;
17338 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt;
17339 if (rack->r_fill_less_agg) {
17341 * We want the average of the rate_wanted
17342 * and our fill-cw calculated bw. We also want
17343 * to cap any increase to be no more than
17344 * X times the lt_bw (where X is the rack_bw_multipler).
17346 uint64_t lt_bw, rate;
17348 lt_bw = rack_get_lt_bw(rack);
17349 if (lt_bw > *rate_wanted)
17352 rate = *rate_wanted;
17355 if (rack_bw_multipler && (fill_bw > (rate * rack_bw_multipler))) {
17356 fill_bw = rate * rack_bw_multipler;
17359 /* We are below the min b/w */
17361 *rate_wanted = fill_bw;
17362 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted))
17364 rack->r_via_fill_cw = 1;
17365 if (rack->r_rack_hw_rate_caps &&
17366 (rack->r_ctl.crte != NULL)) {
17367 uint64_t high_rate;
17369 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
17370 if (fill_bw > high_rate) {
17371 /* We are capping bw at the highest rate table entry */
17372 if (*rate_wanted > high_rate) {
17373 /* The original rate was also capped */
17374 rack->r_via_fill_cw = 0;
17376 rack_log_hdwr_pacing(rack,
17377 fill_bw, high_rate, __LINE__,
17379 fill_bw = high_rate;
17383 } else if ((rack->r_ctl.crte == NULL) &&
17384 (rack->rack_hdrw_pacing == 0) &&
17385 (rack->rack_hdw_pace_ena) &&
17386 rack->r_rack_hw_rate_caps &&
17387 (rack->rack_attempt_hdwr_pace == 0) &&
17388 (rack->rc_inp->inp_route.ro_nh != NULL) &&
17389 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
17391 * Ok we may have a first attempt that is greater than our top rate
17394 uint64_t high_rate;
17396 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
17398 if (fill_bw > high_rate) {
17399 fill_bw = high_rate;
17405 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) {
17406 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
17407 fill_bw, 0, 0, HYBRID_LOG_RATE_CAP, 2, NULL);
17408 fill_bw = rack->r_ctl.bw_rate_cap;
17411 * Ok fill_bw holds our mythical b/w to fill the cwnd
17412 * in an rtt (unless it was capped), what does that
17413 * time wise equate too?
17415 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC;
17417 *rate_wanted = fill_bw;
17418 if (non_paced || (lentim < slot)) {
17419 rack_log_pacing_delay_calc(rack, len, slot, fill_bw,
17420 0, lentim, 12, __LINE__, NULL, 0);
17421 return ((int32_t)lentim);
17427 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz)
17431 int32_t minslot = 0;
17432 int can_start_hw_pacing = 1;
17436 if (rack_pace_one_seg ||
17437 (rack->r_ctl.rc_user_set_min_segs == 1))
17441 if (rack->rc_always_pace == 0) {
17443 * We use the most optimistic possible cwnd/srtt for
17444 * sending calculations. This will make our
17445 * calculation anticipate getting more through
17446 * quicker then possible. But thats ok we don't want
17447 * the peer to have a gap in data sending.
17449 uint64_t cwnd, tr_perms = 0;
17450 int32_t reduce = 0;
17454 * We keep no precise pacing with the old method
17455 * instead we use the pacer to mitigate bursts.
17457 if (rack->r_ctl.rc_rack_min_rtt)
17458 srtt = rack->r_ctl.rc_rack_min_rtt;
17460 srtt = max(tp->t_srtt, 1);
17461 if (rack->r_ctl.rc_rack_largest_cwnd)
17462 cwnd = rack->r_ctl.rc_rack_largest_cwnd;
17464 cwnd = rack->r_ctl.cwnd_to_use;
17465 /* Inflate cwnd by 1000 so srtt of usecs is in ms */
17466 tr_perms = (cwnd * 1000) / srtt;
17467 if (tr_perms == 0) {
17468 tr_perms = ctf_fixed_maxseg(tp);
17471 * Calculate how long this will take to drain, if
17472 * the calculation comes out to zero, thats ok we
17473 * will use send_a_lot to possibly spin around for
17474 * more increasing tot_len_this_send to the point
17475 * that its going to require a pace, or we hit the
17476 * cwnd. Which in that case we are just waiting for
17479 slot = len / tr_perms;
17480 /* Now do we reduce the time so we don't run dry? */
17481 if (slot && rack_slot_reduction) {
17482 reduce = (slot / rack_slot_reduction);
17483 if (reduce < slot) {
17488 slot *= HPTS_USEC_IN_MSEC;
17489 if (rack->rc_pace_to_cwnd) {
17490 uint64_t rate_wanted = 0;
17492 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1);
17493 rack->rc_ack_can_sendout_data = 1;
17494 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0);
17496 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0);
17497 /*******************************************************/
17498 /* RRS: We insert non-paced call to stats here for len */
17499 /*******************************************************/
17501 uint64_t bw_est, res, lentim, rate_wanted;
17506 if ((rack->r_rr_config == 1) && rsm) {
17507 return (rack->r_ctl.rc_min_to);
17509 if (rack->use_fixed_rate) {
17510 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack);
17511 } else if ((rack->r_ctl.init_rate == 0) &&
17512 (rack->r_ctl.gp_bw == 0)) {
17513 /* no way to yet do an estimate */
17514 bw_est = rate_wanted = 0;
17516 bw_est = rack_get_bw(rack);
17517 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped);
17519 if ((bw_est == 0) || (rate_wanted == 0) ||
17520 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) {
17522 * No way yet to make a b/w estimate or
17523 * our raise is set incorrectly.
17527 rack_rate_cap_bw(rack, &rate_wanted, &capped);
17528 /* We need to account for all the overheads */
17529 segs = (len + segsiz - 1) / segsiz;
17531 * We need the diff between 1514 bytes (e-mtu with e-hdr)
17532 * and how much data we put in each packet. Yes this
17533 * means we may be off if we are larger than 1500 bytes
17534 * or smaller. But this just makes us more conservative.
17537 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr);
17538 if (rack->r_is_v6) {
17540 oh += sizeof(struct ip6_hdr);
17544 oh += sizeof(struct ip);
17547 /* We add a fixed 14 for the ethernet header */
17550 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC;
17551 res = lentim / rate_wanted;
17552 slot = (uint32_t)res;
17553 if (rack_hw_rate_min &&
17554 (rate_wanted < rack_hw_rate_min)) {
17555 can_start_hw_pacing = 0;
17556 if (rack->r_ctl.crte) {
17558 * Ok we need to release it, we
17559 * have fallen too low.
17561 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
17562 rack->r_ctl.crte = NULL;
17563 rack->rack_attempt_hdwr_pace = 0;
17564 rack->rack_hdrw_pacing = 0;
17567 if (rack->r_ctl.crte &&
17568 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) {
17570 * We want more than the hardware can give us,
17571 * don't start any hw pacing.
17573 can_start_hw_pacing = 0;
17574 if (rack->r_rack_hw_rate_caps == 0) {
17576 * Ok we need to release it, we
17577 * want more than the card can give us and
17578 * no rate cap is in place. Set it up so
17579 * when we want less we can retry.
17581 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
17582 rack->r_ctl.crte = NULL;
17583 rack->rack_attempt_hdwr_pace = 0;
17584 rack->rack_hdrw_pacing = 0;
17587 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) {
17589 * We lost our rate somehow, this can happen
17590 * if the interface changed underneath us.
17592 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
17593 rack->r_ctl.crte = NULL;
17594 /* Lets re-allow attempting to setup pacing */
17595 rack->rack_hdrw_pacing = 0;
17596 rack->rack_attempt_hdwr_pace = 0;
17597 rack_log_hdwr_pacing(rack,
17598 rate_wanted, bw_est, __LINE__,
17601 prev_fill = rack->r_via_fill_cw;
17602 if ((rack->rc_pace_to_cwnd) &&
17604 (rack->use_fixed_rate == 0) &&
17605 (rack->in_probe_rtt == 0) &&
17606 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) {
17608 * We want to pace at our rate *or* faster to
17609 * fill the cwnd to the max if its not full.
17611 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0);
17612 /* Re-check to make sure we are not exceeding our max b/w */
17613 if ((rack->r_ctl.crte != NULL) &&
17614 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) {
17616 * We want more than the hardware can give us,
17617 * don't start any hw pacing.
17619 can_start_hw_pacing = 0;
17620 if (rack->r_rack_hw_rate_caps == 0) {
17622 * Ok we need to release it, we
17623 * want more than the card can give us and
17624 * no rate cap is in place. Set it up so
17625 * when we want less we can retry.
17627 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
17628 rack->r_ctl.crte = NULL;
17629 rack->rack_attempt_hdwr_pace = 0;
17630 rack->rack_hdrw_pacing = 0;
17631 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
17635 if ((rack->rc_inp->inp_route.ro_nh != NULL) &&
17636 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
17637 if ((rack->rack_hdw_pace_ena) &&
17638 (can_start_hw_pacing > 0) &&
17639 (rack->rack_hdrw_pacing == 0) &&
17640 (rack->rack_attempt_hdwr_pace == 0)) {
17642 * Lets attempt to turn on hardware pacing
17645 rack->rack_attempt_hdwr_pace = 1;
17646 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp,
17647 rack->rc_inp->inp_route.ro_nh->nh_ifp,
17650 &err, &rack->r_ctl.crte_prev_rate);
17651 if (rack->r_ctl.crte) {
17652 rack->rack_hdrw_pacing = 1;
17653 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz,
17654 pace_one, rack->r_ctl.crte,
17655 NULL, rack->r_ctl.pace_len_divisor);
17656 rack_log_hdwr_pacing(rack,
17657 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
17659 rack->r_ctl.last_hw_bw_req = rate_wanted;
17661 counter_u64_add(rack_hw_pace_init_fail, 1);
17663 } else if (rack->rack_hdrw_pacing &&
17664 (rack->r_ctl.last_hw_bw_req != rate_wanted)) {
17665 /* Do we need to adjust our rate? */
17666 const struct tcp_hwrate_limit_table *nrte;
17668 if (rack->r_up_only &&
17669 (rate_wanted < rack->r_ctl.crte->rate)) {
17671 * We have four possible states here
17672 * having to do with the previous time
17674 * previous | this-time
17675 * A) 0 | 0 -- fill_cw not in the picture
17676 * B) 1 | 0 -- we were doing a fill-cw but now are not
17677 * C) 1 | 1 -- all rates from fill_cw
17678 * D) 0 | 1 -- we were doing non-fill and now we are filling
17680 * For case A, C and D we don't allow a drop. But for
17681 * case B where we now our on our steady rate we do
17685 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0)))
17688 if ((rate_wanted > rack->r_ctl.crte->rate) ||
17689 (rate_wanted <= rack->r_ctl.crte_prev_rate)) {
17690 if (rack_hw_rate_to_low &&
17691 (bw_est < rack_hw_rate_to_low)) {
17693 * The pacing rate is too low for hardware, but
17694 * do allow hardware pacing to be restarted.
17696 rack_log_hdwr_pacing(rack,
17697 bw_est, rack->r_ctl.crte->rate, __LINE__,
17699 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
17700 rack->r_ctl.crte = NULL;
17701 rack->rack_attempt_hdwr_pace = 0;
17702 rack->rack_hdrw_pacing = 0;
17703 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
17706 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte,
17708 rack->rc_inp->inp_route.ro_nh->nh_ifp,
17711 &err, &rack->r_ctl.crte_prev_rate);
17712 if (nrte == NULL) {
17714 * Lost the rate, lets drop hardware pacing
17717 rack->rack_hdrw_pacing = 0;
17718 rack->r_ctl.crte = NULL;
17719 rack_log_hdwr_pacing(rack,
17720 rate_wanted, 0, __LINE__,
17722 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
17723 counter_u64_add(rack_hw_pace_lost, 1);
17724 } else if (nrte != rack->r_ctl.crte) {
17725 rack->r_ctl.crte = nrte;
17726 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted,
17727 segsiz, pace_one, rack->r_ctl.crte,
17728 NULL, rack->r_ctl.pace_len_divisor);
17729 rack_log_hdwr_pacing(rack,
17730 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
17732 rack->r_ctl.last_hw_bw_req = rate_wanted;
17735 /* We just need to adjust the segment size */
17736 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
17737 rack_log_hdwr_pacing(rack,
17738 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
17740 rack->r_ctl.last_hw_bw_req = rate_wanted;
17744 if (minslot && (minslot > slot)) {
17745 rack_log_pacing_delay_calc(rack, minslot, slot, rack->r_ctl.crte->rate, bw_est, lentim,
17746 98, __LINE__, NULL, 0);
17750 if (rack_limit_time_with_srtt &&
17751 (rack->use_fixed_rate == 0) &&
17752 (rack->rack_hdrw_pacing == 0)) {
17754 * Sanity check, we do not allow the pacing delay
17755 * to be longer than the SRTT of the path. If it is
17756 * a slow path, then adding a packet should increase
17757 * the RTT and compensate for this i.e. the srtt will
17758 * be greater so the allowed pacing time will be greater.
17760 * Note this restriction is not for where a peak rate
17761 * is set, we are doing fixed pacing or hardware pacing.
17763 if (rack->rc_tp->t_srtt)
17764 srtt = rack->rc_tp->t_srtt;
17766 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */
17767 if (srtt < (uint64_t)slot) {
17768 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0);
17772 /*******************************************************************/
17773 /* RRS: We insert paced call to stats here for len and rate_wanted */
17774 /*******************************************************************/
17775 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0);
17777 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) {
17779 * If this rate is seeing enobufs when it
17780 * goes to send then either the nic is out
17781 * of gas or we are mis-estimating the time
17782 * somehow and not letting the queue empty
17783 * completely. Lets add to the pacing time.
17785 int hw_boost_delay;
17787 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult;
17788 if (hw_boost_delay > rack_enobuf_hw_max)
17789 hw_boost_delay = rack_enobuf_hw_max;
17790 else if (hw_boost_delay < rack_enobuf_hw_min)
17791 hw_boost_delay = rack_enobuf_hw_min;
17792 slot += hw_boost_delay;
17798 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack,
17799 tcp_seq startseq, uint32_t sb_offset)
17801 struct rack_sendmap *my_rsm = NULL;
17803 if (tp->t_state < TCPS_ESTABLISHED) {
17805 * We don't start any measurements if we are
17806 * not at least established.
17810 if (tp->t_state >= TCPS_FIN_WAIT_1) {
17812 * We will get no more data into the SB
17813 * this means we need to have the data available
17814 * before we start a measurement.
17817 if (sbavail(&tptosocket(tp)->so_snd) <
17818 max(rc_init_window(rack),
17819 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) {
17820 /* Nope not enough data */
17824 tp->t_flags |= TF_GPUTINPROG;
17825 rack->r_ctl.rc_gp_cumack_ts = 0;
17826 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
17827 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
17828 tp->gput_seq = startseq;
17829 rack->app_limited_needs_set = 0;
17830 if (rack->in_probe_rtt)
17831 rack->measure_saw_probe_rtt = 1;
17832 else if ((rack->measure_saw_probe_rtt) &&
17833 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
17834 rack->measure_saw_probe_rtt = 0;
17835 if (rack->rc_gp_filled)
17836 tp->gput_ts = rack->r_ctl.last_cumack_advance;
17838 /* Special case initial measurement */
17841 tp->gput_ts = tcp_get_usecs(&tv);
17842 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
17845 * We take a guess out into the future,
17846 * if we have no measurement and no
17847 * initial rate, we measure the first
17848 * initial-windows worth of data to
17849 * speed up getting some GP measurement and
17850 * thus start pacing.
17852 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) {
17853 rack->app_limited_needs_set = 1;
17854 tp->gput_ack = startseq + max(rc_init_window(rack),
17855 (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
17856 rack_log_pacing_delay_calc(rack,
17861 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts),
17863 __LINE__, NULL, 0);
17864 rack_tend_gp_marks(tp, rack);
17865 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL);
17870 * We are out somewhere in the sb
17871 * can we use the already outstanding data?
17874 if (rack->r_ctl.rc_app_limited_cnt == 0) {
17876 * Yes first one is good and in this case
17877 * the tp->gput_ts is correctly set based on
17878 * the last ack that arrived (no need to
17879 * set things up when an ack comes in).
17881 my_rsm = tqhash_min(rack->r_ctl.tqh);
17882 if ((my_rsm == NULL) ||
17883 (my_rsm->r_rtr_cnt != 1)) {
17884 /* retransmission? */
17888 if (rack->r_ctl.rc_first_appl == NULL) {
17890 * If rc_first_appl is NULL
17891 * then the cnt should be 0.
17892 * This is probably an error, maybe
17893 * a KASSERT would be approprate.
17898 * If we have a marker pointer to the last one that is
17899 * app limited we can use that, but we need to set
17900 * things up so that when it gets ack'ed we record
17901 * the ack time (if its not already acked).
17903 rack->app_limited_needs_set = 1;
17905 * We want to get to the rsm that is either
17906 * next with space i.e. over 1 MSS or the one
17907 * after that (after the app-limited).
17909 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl);
17911 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp))
17912 /* Have to use the next one */
17913 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm);
17915 /* Use after the first MSS of it is acked */
17916 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp);
17920 if ((my_rsm == NULL) ||
17921 (my_rsm->r_rtr_cnt != 1)) {
17923 * Either its a retransmit or
17924 * the last is the app-limited one.
17929 tp->gput_seq = my_rsm->r_start;
17931 if (my_rsm->r_flags & RACK_ACKED) {
17933 * This one has been acked use the arrival ack time
17935 struct rack_sendmap *nrsm;
17937 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
17938 rack->app_limited_needs_set = 0;
17940 * Ok in this path we need to use the r_end now
17941 * since this guy is the starting ack.
17943 tp->gput_seq = my_rsm->r_end;
17945 * We also need to adjust up the sendtime
17946 * to the send of the next data after my_rsm.
17948 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm);
17953 * The next as not been sent, thats the
17954 * case for using the latest.
17959 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0];
17960 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
17961 rack->r_ctl.rc_gp_cumack_ts = 0;
17962 rack_log_pacing_delay_calc(rack,
17967 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts),
17969 __LINE__, my_rsm, 0);
17970 /* Now lets make sure all are marked as they should be */
17971 rack_tend_gp_marks(tp, rack);
17972 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL);
17978 * We don't know how long we may have been
17979 * idle or if this is the first-send. Lets
17980 * setup the flag so we will trim off
17981 * the first ack'd data so we get a true
17984 rack->app_limited_needs_set = 1;
17985 tp->gput_ack = startseq + rack_get_measure_window(tp, rack);
17986 rack->r_ctl.rc_gp_cumack_ts = 0;
17987 /* Find this guy so we can pull the send time */
17988 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq);
17990 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0];
17991 if (my_rsm->r_flags & RACK_ACKED) {
17993 * Unlikely since its probably what was
17994 * just transmitted (but I am paranoid).
17996 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
17997 rack->app_limited_needs_set = 0;
17999 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) {
18000 /* This also is unlikely */
18001 tp->gput_seq = my_rsm->r_start;
18005 * TSNH unless we have some send-map limit,
18006 * and even at that it should not be hitting
18007 * that limit (we should have stopped sending).
18012 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
18014 rack_tend_gp_marks(tp, rack);
18015 rack_log_pacing_delay_calc(rack,
18020 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts),
18021 9, __LINE__, NULL, 0);
18022 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL);
18025 static inline uint32_t
18026 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use,
18027 uint32_t avail, int32_t sb_offset)
18032 if (tp->snd_wnd > cwnd_to_use)
18033 sendwin = cwnd_to_use;
18035 sendwin = tp->snd_wnd;
18036 if (ctf_outstanding(tp) >= tp->snd_wnd) {
18037 /* We never want to go over our peers rcv-window */
18042 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
18043 if (flight >= sendwin) {
18045 * We have in flight what we are allowed by cwnd (if
18046 * it was rwnd blocking it would have hit above out
18051 len = sendwin - flight;
18052 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) {
18053 /* We would send too much (beyond the rwnd) */
18054 len = tp->snd_wnd - ctf_outstanding(tp);
18056 if ((len + sb_offset) > avail) {
18058 * We don't have that much in the SB, how much is
18061 len = avail - sb_offset;
18068 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags,
18069 unsigned ipoptlen, int32_t orig_len, int32_t len, int error,
18070 int rsm_is_null, int optlen, int line, uint16_t mode)
18072 if (tcp_bblogging_on(rack->rc_tp)) {
18073 union tcp_log_stackspecific log;
18076 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
18077 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
18078 log.u_bbr.flex1 = error;
18079 log.u_bbr.flex2 = flags;
18080 log.u_bbr.flex3 = rsm_is_null;
18081 log.u_bbr.flex4 = ipoptlen;
18082 log.u_bbr.flex5 = tp->rcv_numsacks;
18083 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
18084 log.u_bbr.flex7 = optlen;
18085 log.u_bbr.flex8 = rack->r_fsb_inited;
18086 log.u_bbr.applimited = rack->r_fast_output;
18087 log.u_bbr.bw_inuse = rack_get_bw(rack);
18088 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
18089 log.u_bbr.cwnd_gain = mode;
18090 log.u_bbr.pkts_out = orig_len;
18091 log.u_bbr.lt_epoch = len;
18092 log.u_bbr.delivered = line;
18093 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
18094 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
18095 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0,
18096 len, &log, false, NULL, __func__, __LINE__, &tv);
18101 static struct mbuf *
18102 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen,
18103 struct rack_fast_send_blk *fsb,
18104 int32_t seglimit, int32_t segsize, int hw_tls)
18107 struct ktls_session *tls, *ntls;
18109 struct mbuf *start;
18112 struct mbuf *m, *n, **np, *smb;
18115 int32_t len = *plen;
18117 int32_t len_cp = 0;
18118 uint32_t mlen, frags;
18120 soff = off = the_off;
18125 if (hw_tls && (m->m_flags & M_EXTPG))
18126 tls = m->m_epg_tls;
18140 if (m->m_flags & M_EXTPG)
18141 ntls = m->m_epg_tls;
18146 * Avoid mixing TLS records with handshake
18147 * data or TLS records from different
18157 mlen = min(len, m->m_len - off);
18160 * For M_EXTPG mbufs, add 3 segments
18161 * + 1 in case we are crossing page boundaries
18162 * + 2 in case the TLS hdr/trailer are used
18163 * It is cheaper to just add the segments
18164 * than it is to take the cache miss to look
18165 * at the mbuf ext_pgs state in detail.
18167 if (m->m_flags & M_EXTPG) {
18168 fragsize = min(segsize, PAGE_SIZE);
18171 fragsize = segsize;
18175 /* Break if we really can't fit anymore. */
18176 if ((frags + 1) >= seglimit) {
18182 * Reduce size if you can't copy the whole
18183 * mbuf. If we can't copy the whole mbuf, also
18184 * adjust len so the loop will end after this
18187 if ((frags + howmany(mlen, fragsize)) >= seglimit) {
18188 mlen = (seglimit - frags - 1) * fragsize;
18190 *plen = len_cp + len;
18192 frags += howmany(mlen, fragsize);
18196 KASSERT(seglimit > 0,
18197 ("%s: seglimit went too low", __func__));
18199 n = m_get(M_NOWAIT, m->m_type);
18205 len_cp += n->m_len;
18206 if (m->m_flags & (M_EXT|M_EXTPG)) {
18207 n->m_data = m->m_data + off;
18210 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
18217 if (len || (soff == smb->m_len)) {
18219 * We have more so we move forward or
18220 * we have consumed the entire mbuf and
18221 * len has fell to 0.
18233 * Save off the size of the mbuf. We do
18234 * this so that we can recognize when it
18235 * has been trimmed by sbcut() as acks
18238 fsb->o_m_len = smb->m_len;
18239 fsb->o_t_len = M_TRAILINGROOM(smb);
18242 * This is the case where the next mbuf went to NULL. This
18243 * means with this copy we have sent everything in the sb.
18244 * In theory we could clear the fast_output flag, but lets
18245 * not since its possible that we could get more added
18246 * and acks that call the extend function which would let
18262 * This is a copy of m_copym(), taking the TSO segment size/limit
18263 * constraints into account, and advancing the sndptr as it goes.
18265 static struct mbuf *
18266 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen,
18267 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff)
18269 struct mbuf *m, *n;
18272 m = rack->r_ctl.fsb.m;
18273 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) {
18275 * The trailing space changed, mbufs can grow
18276 * at the tail but they can't shrink from
18277 * it, KASSERT that. Adjust the orig_m_len to
18278 * compensate for this change.
18280 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)),
18281 ("mbuf:%p rack:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n",
18284 (intmax_t)M_TRAILINGROOM(m),
18285 rack->r_ctl.fsb.o_t_len,
18286 rack->r_ctl.fsb.o_m_len,
18288 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m));
18289 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m);
18291 if (m->m_len < rack->r_ctl.fsb.o_m_len) {
18293 * Mbuf shrank, trimmed off the top by an ack, our
18296 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)),
18297 ("mbuf:%p len:%u rack:%p oml:%u soff:%u\n",
18299 rack, rack->r_ctl.fsb.o_m_len,
18300 rack->r_ctl.fsb.off));
18302 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len))
18303 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len);
18305 rack->r_ctl.fsb.off = 0;
18306 rack->r_ctl.fsb.o_m_len = m->m_len;
18308 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) {
18309 panic("rack:%p m:%p m_len grew outside of t_space compensation",
18313 soff = rack->r_ctl.fsb.off;
18314 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff));
18315 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen));
18316 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?",
18318 rack, *plen, m, m->m_len));
18319 /* Save off the right location before we copy and advance */
18321 *s_mb = rack->r_ctl.fsb.m;
18322 n = rack_fo_base_copym(m, soff, plen,
18324 seglimit, segsize, rack->r_ctl.fsb.hw_tls);
18328 /* Log the buffer level */
18330 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack,
18331 int len, struct timeval *tv,
18334 uint32_t p_rate = 0, p_queue = 0, err = 0;
18335 union tcp_log_stackspecific log;
18338 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue);
18339 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate);
18341 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
18342 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
18343 log.u_bbr.flex1 = p_rate;
18344 log.u_bbr.flex2 = p_queue;
18345 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using;
18346 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs;
18347 log.u_bbr.flex6 = rack->r_ctl.crte->time_between;
18348 log.u_bbr.flex7 = 99;
18349 log.u_bbr.flex8 = 0;
18350 log.u_bbr.pkts_out = err;
18351 log.u_bbr.delRate = rack->r_ctl.crte->rate;
18352 log.u_bbr.timeStamp = cts;
18353 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
18354 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0,
18355 len, &log, false, NULL, __func__, __LINE__, tv);
18360 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp,
18361 struct timeval *tv, uint32_t cts, int len, uint32_t segsiz)
18363 uint64_t lentime = 0;
18365 uint32_t p_rate = 0, p_queue = 0, err;
18366 union tcp_log_stackspecific log;
18369 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue);
18370 /* Failed or queue is zero */
18371 if (err || (p_queue == 0)) {
18375 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate);
18381 * If we reach here we have some bytes in
18382 * the queue. The number returned is a value
18383 * between 0 and 0xffff where ffff is full
18384 * and 0 is empty. So how best to make this into
18385 * something usable?
18387 * The "safer" way is lets take the b/w gotten
18388 * from the query (which should be our b/w rate)
18389 * and pretend that a full send (our rc_pace_max_segs)
18390 * is outstanding. We factor it so its as if a full
18391 * number of our MSS segment is terms of full
18392 * ethernet segments are outstanding.
18396 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz);
18397 lentime *= ETHERNET_SEGMENT_SIZE;
18398 lentime *= (uint64_t)HPTS_USEC_IN_SEC;
18401 /* TSNH -- KASSERT? */
18405 if (tcp_bblogging_on(tp)) {
18406 memset(&log, 0, sizeof(log));
18407 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
18408 log.u_bbr.flex1 = p_rate;
18409 log.u_bbr.flex2 = p_queue;
18410 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using;
18411 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs;
18412 log.u_bbr.flex6 = rack->r_ctl.crte->time_between;
18413 log.u_bbr.flex7 = 99;
18414 log.u_bbr.flex8 = 0;
18415 log.u_bbr.pkts_out = err;
18416 log.u_bbr.delRate = rack->r_ctl.crte->rate;
18417 log.u_bbr.cur_del_rate = lentime;
18418 log.u_bbr.timeStamp = cts;
18419 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
18420 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0,
18421 len, &log, false, NULL, __func__, __LINE__,tv);
18424 return ((uint32_t)lentime);
18428 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm,
18429 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp)
18432 * Enter the fast retransmit path. We are given that a sched_pin is
18433 * in place (if accounting is compliled in) and the cycle count taken
18434 * at the entry is in the ts_val. The concept her is that the rsm
18435 * now holds the mbuf offsets and such so we can directly transmit
18436 * without a lot of overhead, the len field is already set for
18437 * us to prohibit us from sending too much (usually its 1MSS).
18439 struct ip *ip = NULL;
18440 struct udphdr *udp = NULL;
18441 struct tcphdr *th = NULL;
18442 struct mbuf *m = NULL;
18445 struct tcp_log_buffer *lgb;
18446 #ifdef TCP_ACCOUNTING
18451 u_char opt[TCP_MAXOLEN];
18452 uint32_t hdrlen, optlen;
18453 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0;
18455 uint32_t if_hw_tsomaxsegcount = 0, startseq;
18456 uint32_t if_hw_tsomaxsegsize;
18457 int32_t ip_sendflag = IP_NO_SND_TAG_RL;
18460 struct ip6_hdr *ip6 = NULL;
18462 if (rack->r_is_v6) {
18463 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
18464 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
18468 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
18469 hdrlen = sizeof(struct tcpiphdr);
18471 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
18475 /* Its a TLP add the flag, it may already be there but be sure */
18476 rsm->r_flags |= RACK_TLP;
18478 /* If it was a TLP it is not not on this retransmit */
18479 rsm->r_flags &= ~RACK_TLP;
18481 startseq = rsm->r_start;
18482 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
18483 inp = rack->rc_inp;
18485 flags = tcp_outflags[tp->t_state];
18486 if (flags & (TH_SYN|TH_RST)) {
18489 if (rsm->r_flags & RACK_HAS_FIN) {
18490 /* We can't send a FIN here */
18493 if (flags & TH_FIN) {
18494 /* We never send a FIN */
18497 if (tp->t_flags & TF_RCVD_TSTMP) {
18498 to.to_tsval = ms_cts + tp->ts_offset;
18499 to.to_tsecr = tp->ts_recent;
18500 to.to_flags = TOF_TS;
18502 optlen = tcp_addoptions(&to, opt);
18504 udp = rack->r_ctl.fsb.udp;
18506 hdrlen += sizeof(struct udphdr);
18507 if (rack->r_ctl.rc_pace_max_segs)
18508 max_val = rack->r_ctl.rc_pace_max_segs;
18509 else if (rack->rc_user_set_max_segs)
18510 max_val = rack->rc_user_set_max_segs * segsiz;
18513 if ((tp->t_flags & TF_TSO) &&
18519 if (MHLEN < hdrlen + max_linkhdr)
18520 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
18523 m = m_gethdr(M_NOWAIT, MT_DATA);
18526 m->m_data += max_linkhdr;
18528 th = rack->r_ctl.fsb.th;
18529 /* Establish the len to send */
18532 if ((tso) && (len + optlen > segsiz)) {
18533 uint32_t if_hw_tsomax;
18536 /* extract TSO information */
18537 if_hw_tsomax = tp->t_tsomax;
18538 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
18539 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
18541 * Check if we should limit by maximum payload
18544 if (if_hw_tsomax != 0) {
18545 /* compute maximum TSO length */
18546 max_len = (if_hw_tsomax - hdrlen -
18548 if (max_len <= 0) {
18550 } else if (len > max_len) {
18554 if (len <= segsiz) {
18556 * In case there are too many small fragments don't
18564 if ((tso == 0) && (len > segsiz))
18566 (void)tcp_get_usecs(tv);
18568 (len <= MHLEN - hdrlen - max_linkhdr)) {
18571 th->th_seq = htonl(rsm->r_start);
18572 th->th_ack = htonl(tp->rcv_nxt);
18574 * The PUSH bit should only be applied
18575 * if the full retransmission is made. If
18576 * we are sending less than this is the
18577 * left hand edge and should not have
18580 if ((rsm->r_flags & RACK_HAD_PUSH) &&
18581 (len == (rsm->r_end - rsm->r_start)))
18583 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
18584 if (th->th_win == 0) {
18585 tp->t_sndzerowin++;
18586 tp->t_flags |= TF_RXWIN0SENT;
18588 tp->t_flags &= ~TF_RXWIN0SENT;
18589 if (rsm->r_flags & RACK_TLP) {
18591 * TLP should not count in retran count, but
18594 counter_u64_add(rack_tlp_retran, 1);
18595 counter_u64_add(rack_tlp_retran_bytes, len);
18597 tp->t_sndrexmitpack++;
18598 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
18599 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
18602 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
18605 if (rsm->m == NULL)
18608 ((rsm->orig_m_len != rsm->m->m_len) ||
18609 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) {
18610 /* Fix up the orig_m_len and possibly the mbuf offset */
18611 rack_adjust_orig_mlen(rsm);
18613 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls);
18614 if (len <= segsiz) {
18616 * Must have ran out of mbufs for the copy
18617 * shorten it to no longer need tso. Lets
18618 * not put on sendalot since we are low on
18623 if ((m->m_next == NULL) || (len <= 0)){
18628 ulen = hdrlen + len - sizeof(struct ip6_hdr);
18630 ulen = hdrlen + len - sizeof(struct ip);
18631 udp->uh_ulen = htons(ulen);
18633 m->m_pkthdr.rcvif = (struct ifnet *)0;
18634 if (TCPS_HAVERCVDSYN(tp->t_state) &&
18635 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
18636 int ect = tcp_ecn_output_established(tp, &flags, len, true);
18637 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
18638 (tp->t_flags2 & TF2_ECN_SND_ECE))
18639 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
18641 if (rack->r_is_v6) {
18642 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
18643 ip6->ip6_flow |= htonl(ect << 20);
18648 ip->ip_tos &= ~IPTOS_ECN_MASK;
18652 if (rack->r_ctl.crte != NULL) {
18653 /* See if we can send via the hw queue */
18654 slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz);
18655 /* If there is nothing in queue (no pacing time) we can send via the hw queue */
18659 tcp_set_flags(th, flags);
18660 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
18662 if (rack->r_is_v6) {
18664 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
18665 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
18666 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
18667 th->th_sum = htons(0);
18668 UDPSTAT_INC(udps_opackets);
18670 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
18671 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
18672 th->th_sum = in6_cksum_pseudo(ip6,
18673 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
18678 #if defined(INET6) && defined(INET)
18684 m->m_pkthdr.csum_flags = CSUM_UDP;
18685 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
18686 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
18687 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
18688 th->th_sum = htons(0);
18689 UDPSTAT_INC(udps_opackets);
18691 m->m_pkthdr.csum_flags = CSUM_TCP;
18692 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
18693 th->th_sum = in_pseudo(ip->ip_src.s_addr,
18694 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
18695 IPPROTO_TCP + len + optlen));
18697 /* IP version must be set here for ipv4/ipv6 checking later */
18698 KASSERT(ip->ip_v == IPVERSION,
18699 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
18704 * Here we use segsiz since we have no added options besides
18705 * any standard timestamp options (no DSACKs or SACKS are sent
18706 * via either fast-path).
18708 KASSERT(len > segsiz,
18709 ("%s: len <= tso_segsz tp:%p", __func__, tp));
18710 m->m_pkthdr.csum_flags |= CSUM_TSO;
18711 m->m_pkthdr.tso_segsz = segsiz;
18714 if (rack->r_is_v6) {
18715 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
18716 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
18717 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
18718 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
18720 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
18723 #if defined(INET) && defined(INET6)
18728 ip->ip_len = htons(m->m_pkthdr.len);
18729 ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
18730 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
18731 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
18732 if (tp->t_port == 0 || len < V_tcp_minmss) {
18733 ip->ip_off |= htons(IP_DF);
18736 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
18740 if (doing_tlp == 0) {
18741 /* Set we retransmitted */
18742 rack->rc_gp_saw_rec = 1;
18744 /* Its a TLP set ca or ss */
18745 if (tp->snd_cwnd > tp->snd_ssthresh) {
18746 /* Set we sent in CA */
18747 rack->rc_gp_saw_ca = 1;
18749 /* Set we sent in SS */
18750 rack->rc_gp_saw_ss = 1;
18753 /* Time to copy in our header */
18754 cpto = mtod(m, uint8_t *);
18755 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
18756 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
18758 bcopy(opt, th + 1, optlen);
18759 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
18761 th->th_off = sizeof(struct tcphdr) >> 2;
18763 if (tcp_bblogging_on(rack->rc_tp)) {
18764 union tcp_log_stackspecific log;
18766 if (rsm->r_flags & RACK_RWND_COLLAPSED) {
18767 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm);
18768 counter_u64_add(rack_collapsed_win_rxt, 1);
18769 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start));
18771 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
18772 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
18773 if (rack->rack_no_prr)
18774 log.u_bbr.flex1 = 0;
18776 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
18777 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
18778 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
18779 log.u_bbr.flex4 = max_val;
18780 /* Save off the early/late values */
18781 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
18782 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
18783 log.u_bbr.bw_inuse = rack_get_bw(rack);
18784 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw;
18785 if (doing_tlp == 0)
18786 log.u_bbr.flex8 = 1;
18788 log.u_bbr.flex8 = 2;
18789 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
18790 log.u_bbr.flex7 = 55;
18791 log.u_bbr.pkts_out = tp->t_maxseg;
18792 log.u_bbr.timeStamp = cts;
18793 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
18794 if (rsm && (rsm->r_rtr_cnt > 0)) {
18796 * When we have a retransmit we want to log the
18797 * burst at send and flight at send from before.
18799 log.u_bbr.flex5 = rsm->r_fas;
18800 log.u_bbr.bbr_substate = rsm->r_bas;
18803 * This is currently unlikely until we do the
18804 * packet pair probes but I will add it for completeness.
18806 log.u_bbr.flex5 = log.u_bbr.inflight;
18807 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz);
18809 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
18810 log.u_bbr.delivered = 0;
18811 log.u_bbr.rttProp = (uint64_t)rsm;
18812 log.u_bbr.delRate = rsm->r_flags;
18813 log.u_bbr.delRate <<= 31;
18814 log.u_bbr.delRate |= rack->r_must_retran;
18815 log.u_bbr.delRate <<= 1;
18816 log.u_bbr.delRate |= 1;
18817 log.u_bbr.pkt_epoch = __LINE__;
18818 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
18819 len, &log, false, NULL, __func__, __LINE__, tv);
18822 if ((rack->r_ctl.crte != NULL) &&
18823 tcp_bblogging_on(tp)) {
18824 rack_log_queue_level(tp, rack, len, tv, cts);
18827 if (rack->r_is_v6) {
18828 error = ip6_output(m, NULL,
18830 ip_sendflag, NULL, NULL, inp);
18836 error = ip_output(m, NULL,
18838 ip_sendflag, 0, inp);
18843 lgb->tlb_errno = error;
18848 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) {
18849 rack->rc_hw_nobuf = 0;
18850 rack->r_ctl.rc_agg_delayed = 0;
18853 rack->r_ctl.rc_agg_early = 0;
18856 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv),
18857 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz);
18859 rack->rc_tlp_in_progress = 1;
18860 rack->r_ctl.rc_tlp_cnt_out++;
18863 counter_u64_add(rack_total_bytes, len);
18864 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls);
18866 rack->rc_last_sent_tlp_past_cumack = 0;
18867 rack->rc_last_sent_tlp_seq_valid = 1;
18868 rack->r_ctl.last_sent_tlp_seq = rsm->r_start;
18869 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start;
18872 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
18873 rack->forced_ack = 0; /* If we send something zap the FA flag */
18874 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
18875 rack->r_ctl.retran_during_recovery += len;
18879 idx = (len / segsiz) + 3;
18880 if (idx >= TCP_MSS_ACCT_ATIMER)
18881 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
18883 counter_u64_add(rack_out_size[idx], 1);
18885 if (tp->t_rtttime == 0) {
18886 tp->t_rtttime = ticks;
18887 tp->t_rtseq = startseq;
18888 KMOD_TCPSTAT_INC(tcps_segstimed);
18890 counter_u64_add(rack_fto_rsm_send, 1);
18891 if (error && (error == ENOBUFS)) {
18892 if (rack->r_ctl.crte != NULL) {
18893 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF);
18894 if (tcp_bblogging_on(rack->rc_tp))
18895 rack_log_queue_level(tp, rack, len, tv, cts);
18897 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF);
18898 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
18899 if (rack->rc_enobuf < 0x7f)
18901 if (slot < (10 * HPTS_USEC_IN_MSEC))
18902 slot = 10 * HPTS_USEC_IN_MSEC;
18903 if (rack->r_ctl.crte != NULL) {
18904 counter_u64_add(rack_saw_enobuf_hw, 1);
18905 tcp_rl_log_enobuf(rack->r_ctl.crte);
18907 counter_u64_add(rack_saw_enobuf, 1);
18909 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz);
18911 (rack->rc_always_pace == 0) ||
18912 (rack->r_rr_config == 1)) {
18914 * We have no pacing set or we
18915 * are using old-style rack or
18916 * we are overridden to use the old 1ms pacing.
18918 slot = rack->r_ctl.rc_min_to;
18920 rack_start_hpts_timer(rack, tp, cts, slot, len, 0);
18921 #ifdef TCP_ACCOUNTING
18922 crtsc = get_cyclecount();
18923 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18924 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
18926 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18927 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
18929 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18930 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz);
18942 rack_sndbuf_autoscale(struct tcp_rack *rack)
18945 * Automatic sizing of send socket buffer. Often the send buffer
18946 * size is not optimally adjusted to the actual network conditions
18947 * at hand (delay bandwidth product). Setting the buffer size too
18948 * small limits throughput on links with high bandwidth and high
18949 * delay (eg. trans-continental/oceanic links). Setting the
18950 * buffer size too big consumes too much real kernel memory,
18951 * especially with many connections on busy servers.
18953 * The criteria to step up the send buffer one notch are:
18954 * 1. receive window of remote host is larger than send buffer
18955 * (with a fudge factor of 5/4th);
18956 * 2. send buffer is filled to 7/8th with data (so we actually
18957 * have data to make use of it);
18958 * 3. send buffer fill has not hit maximal automatic size;
18959 * 4. our send window (slow start and cogestion controlled) is
18960 * larger than sent but unacknowledged data in send buffer.
18962 * Note that the rack version moves things much faster since
18963 * we want to avoid hitting cache lines in the rack_fast_output()
18964 * path so this is called much less often and thus moves
18965 * the SB forward by a percentage.
18969 uint32_t sendwin, scaleup;
18972 so = rack->rc_inp->inp_socket;
18973 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd);
18974 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
18975 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
18976 sbused(&so->so_snd) >=
18977 (so->so_snd.sb_hiwat / 8 * 7) &&
18978 sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
18979 sendwin >= (sbused(&so->so_snd) -
18980 (tp->snd_nxt - tp->snd_una))) {
18981 if (rack_autosndbuf_inc)
18982 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100;
18984 scaleup = V_tcp_autosndbuf_inc;
18985 if (scaleup < V_tcp_autosndbuf_inc)
18986 scaleup = V_tcp_autosndbuf_inc;
18987 scaleup += so->so_snd.sb_hiwat;
18988 if (scaleup > V_tcp_autosndbuf_max)
18989 scaleup = V_tcp_autosndbuf_max;
18990 if (!sbreserve_locked(so, SO_SND, scaleup, curthread))
18991 so->so_snd.sb_flags &= ~SB_AUTOSIZE;
18997 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val,
18998 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err)
19001 * Enter to do fast output. We are given that the sched_pin is
19002 * in place (if accounting is compiled in) and the cycle count taken
19003 * at entry is in place in ts_val. The idea here is that
19004 * we know how many more bytes needs to be sent (presumably either
19005 * during pacing or to fill the cwnd and that was greater than
19006 * the max-burst). We have how much to send and all the info we
19007 * need to just send.
19010 struct ip *ip = NULL;
19012 struct udphdr *udp = NULL;
19013 struct tcphdr *th = NULL;
19014 struct mbuf *m, *s_mb;
19017 struct tcp_log_buffer *lgb;
19018 #ifdef TCP_ACCOUNTING
19022 u_char opt[TCP_MAXOLEN];
19023 uint32_t hdrlen, optlen;
19024 #ifdef TCP_ACCOUNTING
19027 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0;
19030 uint32_t if_hw_tsomaxsegcount = 0, startseq;
19031 uint32_t if_hw_tsomaxsegsize;
19032 uint16_t add_flag = RACK_SENT_FP;
19034 struct ip6_hdr *ip6 = NULL;
19036 if (rack->r_is_v6) {
19037 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
19038 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
19043 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
19044 hdrlen = sizeof(struct tcpiphdr);
19047 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
19051 startseq = tp->snd_max;
19052 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
19053 inp = rack->rc_inp;
19054 len = rack->r_ctl.fsb.left_to_send;
19056 flags = rack->r_ctl.fsb.tcp_flags;
19057 if (tp->t_flags & TF_RCVD_TSTMP) {
19058 to.to_tsval = ms_cts + tp->ts_offset;
19059 to.to_tsecr = tp->ts_recent;
19060 to.to_flags = TOF_TS;
19062 optlen = tcp_addoptions(&to, opt);
19064 udp = rack->r_ctl.fsb.udp;
19066 hdrlen += sizeof(struct udphdr);
19067 if (rack->r_ctl.rc_pace_max_segs)
19068 max_val = rack->r_ctl.rc_pace_max_segs;
19069 else if (rack->rc_user_set_max_segs)
19070 max_val = rack->rc_user_set_max_segs * segsiz;
19073 if ((tp->t_flags & TF_TSO) &&
19080 if (MHLEN < hdrlen + max_linkhdr)
19081 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
19084 m = m_gethdr(M_NOWAIT, MT_DATA);
19087 m->m_data += max_linkhdr;
19089 th = rack->r_ctl.fsb.th;
19090 /* Establish the len to send */
19093 if ((tso) && (len + optlen > segsiz)) {
19094 uint32_t if_hw_tsomax;
19097 /* extract TSO information */
19098 if_hw_tsomax = tp->t_tsomax;
19099 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
19100 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
19102 * Check if we should limit by maximum payload
19105 if (if_hw_tsomax != 0) {
19106 /* compute maximum TSO length */
19107 max_len = (if_hw_tsomax - hdrlen -
19109 if (max_len <= 0) {
19111 } else if (len > max_len) {
19115 if (len <= segsiz) {
19117 * In case there are too many small fragments don't
19125 if ((tso == 0) && (len > segsiz))
19127 (void)tcp_get_usecs(tv);
19129 (len <= MHLEN - hdrlen - max_linkhdr)) {
19132 sb_offset = tp->snd_max - tp->snd_una;
19133 th->th_seq = htonl(tp->snd_max);
19134 th->th_ack = htonl(tp->rcv_nxt);
19135 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
19136 if (th->th_win == 0) {
19137 tp->t_sndzerowin++;
19138 tp->t_flags |= TF_RXWIN0SENT;
19140 tp->t_flags &= ~TF_RXWIN0SENT;
19141 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
19142 KMOD_TCPSTAT_INC(tcps_sndpack);
19143 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
19145 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
19148 if (rack->r_ctl.fsb.m == NULL)
19151 /* s_mb and s_soff are saved for rack_log_output */
19152 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize,
19154 if (len <= segsiz) {
19156 * Must have ran out of mbufs for the copy
19157 * shorten it to no longer need tso. Lets
19158 * not put on sendalot since we are low on
19163 if (rack->r_ctl.fsb.rfo_apply_push &&
19164 (len == rack->r_ctl.fsb.left_to_send)) {
19165 tcp_set_flags(th, flags | TH_PUSH);
19166 add_flag |= RACK_HAD_PUSH;
19168 if ((m->m_next == NULL) || (len <= 0)){
19173 ulen = hdrlen + len - sizeof(struct ip6_hdr);
19175 ulen = hdrlen + len - sizeof(struct ip);
19176 udp->uh_ulen = htons(ulen);
19178 m->m_pkthdr.rcvif = (struct ifnet *)0;
19179 if (TCPS_HAVERCVDSYN(tp->t_state) &&
19180 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
19181 int ect = tcp_ecn_output_established(tp, &flags, len, false);
19182 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
19183 (tp->t_flags2 & TF2_ECN_SND_ECE))
19184 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
19186 if (rack->r_is_v6) {
19187 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
19188 ip6->ip6_flow |= htonl(ect << 20);
19194 ip->ip_tos &= ~IPTOS_ECN_MASK;
19199 tcp_set_flags(th, flags);
19200 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
19202 if (rack->r_is_v6) {
19204 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
19205 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
19206 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
19207 th->th_sum = htons(0);
19208 UDPSTAT_INC(udps_opackets);
19210 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
19211 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
19212 th->th_sum = in6_cksum_pseudo(ip6,
19213 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
19218 #if defined(INET6) && defined(INET)
19224 m->m_pkthdr.csum_flags = CSUM_UDP;
19225 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
19226 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
19227 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
19228 th->th_sum = htons(0);
19229 UDPSTAT_INC(udps_opackets);
19231 m->m_pkthdr.csum_flags = CSUM_TCP;
19232 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
19233 th->th_sum = in_pseudo(ip->ip_src.s_addr,
19234 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
19235 IPPROTO_TCP + len + optlen));
19237 /* IP version must be set here for ipv4/ipv6 checking later */
19238 KASSERT(ip->ip_v == IPVERSION,
19239 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
19244 * Here we use segsiz since we have no added options besides
19245 * any standard timestamp options (no DSACKs or SACKS are sent
19246 * via either fast-path).
19248 KASSERT(len > segsiz,
19249 ("%s: len <= tso_segsz tp:%p", __func__, tp));
19250 m->m_pkthdr.csum_flags |= CSUM_TSO;
19251 m->m_pkthdr.tso_segsz = segsiz;
19254 if (rack->r_is_v6) {
19255 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
19256 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
19257 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
19258 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
19260 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
19263 #if defined(INET) && defined(INET6)
19268 ip->ip_len = htons(m->m_pkthdr.len);
19269 ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
19270 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
19271 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
19272 if (tp->t_port == 0 || len < V_tcp_minmss) {
19273 ip->ip_off |= htons(IP_DF);
19276 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
19280 if (tp->snd_cwnd > tp->snd_ssthresh) {
19281 /* Set we sent in CA */
19282 rack->rc_gp_saw_ca = 1;
19284 /* Set we sent in SS */
19285 rack->rc_gp_saw_ss = 1;
19287 /* Time to copy in our header */
19288 cpto = mtod(m, uint8_t *);
19289 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
19290 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
19292 bcopy(opt, th + 1, optlen);
19293 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
19295 th->th_off = sizeof(struct tcphdr) >> 2;
19297 if ((rack->r_ctl.crte != NULL) &&
19298 tcp_bblogging_on(tp)) {
19299 rack_log_queue_level(tp, rack, len, tv, cts);
19301 if (tcp_bblogging_on(rack->rc_tp)) {
19302 union tcp_log_stackspecific log;
19304 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
19305 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
19306 if (rack->rack_no_prr)
19307 log.u_bbr.flex1 = 0;
19309 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
19310 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
19311 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
19312 log.u_bbr.flex4 = max_val;
19313 /* Save off the early/late values */
19314 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
19315 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
19316 log.u_bbr.bw_inuse = rack_get_bw(rack);
19317 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw;
19318 log.u_bbr.flex8 = 0;
19319 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
19320 log.u_bbr.flex7 = 44;
19321 log.u_bbr.pkts_out = tp->t_maxseg;
19322 log.u_bbr.timeStamp = cts;
19323 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
19324 log.u_bbr.flex5 = log.u_bbr.inflight;
19325 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
19326 log.u_bbr.delivered = 0;
19327 log.u_bbr.rttProp = 0;
19328 log.u_bbr.delRate = rack->r_must_retran;
19329 log.u_bbr.delRate <<= 1;
19330 log.u_bbr.pkt_epoch = __LINE__;
19331 /* For fast output no retrans so just inflight and how many mss we send */
19332 log.u_bbr.flex5 = log.u_bbr.inflight;
19333 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz);
19334 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
19335 len, &log, false, NULL, __func__, __LINE__, tv);
19339 if (rack->r_is_v6) {
19340 error = ip6_output(m, NULL,
19342 0, NULL, NULL, inp);
19345 #if defined(INET) && defined(INET6)
19350 error = ip_output(m, NULL,
19356 lgb->tlb_errno = error;
19363 } else if (rack->rc_hw_nobuf) {
19364 rack->rc_hw_nobuf = 0;
19365 rack->r_ctl.rc_agg_delayed = 0;
19368 rack->r_ctl.rc_agg_early = 0;
19370 if ((error == 0) && (rack->lt_bw_up == 0)) {
19372 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv);
19373 rack->r_ctl.lt_seq = tp->snd_una;
19374 rack->lt_bw_up = 1;
19376 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv),
19377 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz);
19379 if (tp->snd_una == tp->snd_max) {
19380 rack->r_ctl.rc_tlp_rxt_last_time = cts;
19381 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
19382 tp->t_acktime = ticks;
19384 counter_u64_add(rack_total_bytes, len);
19385 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls);
19387 rack->forced_ack = 0; /* If we send something zap the FA flag */
19389 if ((tp->t_flags & TF_GPUTINPROG) == 0)
19390 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset);
19391 tp->snd_max += len;
19392 tp->snd_nxt = tp->snd_max;
19393 if (rack->rc_new_rnd_needed) {
19395 * Update the rnd to start ticking not
19396 * that from a time perspective all of
19397 * the preceding idle time is "in the round"
19399 rack->rc_new_rnd_needed = 0;
19400 rack->r_ctl.roundends = tp->snd_max;
19405 idx = (len / segsiz) + 3;
19406 if (idx >= TCP_MSS_ACCT_ATIMER)
19407 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
19409 counter_u64_add(rack_out_size[idx], 1);
19411 if (len <= rack->r_ctl.fsb.left_to_send)
19412 rack->r_ctl.fsb.left_to_send -= len;
19414 rack->r_ctl.fsb.left_to_send = 0;
19415 if (rack->r_ctl.fsb.left_to_send < segsiz) {
19416 rack->r_fast_output = 0;
19417 rack->r_ctl.fsb.left_to_send = 0;
19418 /* At the end of fast_output scale up the sb */
19419 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd);
19420 rack_sndbuf_autoscale(rack);
19421 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd);
19423 if (tp->t_rtttime == 0) {
19424 tp->t_rtttime = ticks;
19425 tp->t_rtseq = startseq;
19426 KMOD_TCPSTAT_INC(tcps_segstimed);
19428 if ((rack->r_ctl.fsb.left_to_send >= segsiz) &&
19433 th = rack->r_ctl.fsb.th;
19434 #ifdef TCP_ACCOUNTING
19439 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
19440 counter_u64_add(rack_fto_send, 1);
19441 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz);
19442 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0);
19443 #ifdef TCP_ACCOUNTING
19444 crtsc = get_cyclecount();
19445 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19446 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
19448 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19449 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
19451 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19452 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz);
19460 rack->r_fast_output = 0;
19465 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack,
19466 struct sockbuf *sb,
19467 int len, int orig_len, int segsiz, uint32_t pace_max_seg,
19471 rack->r_fast_output = 1;
19472 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
19473 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
19474 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m);
19475 rack->r_ctl.fsb.tcp_flags = flags;
19476 rack->r_ctl.fsb.left_to_send = orig_len - len;
19477 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) {
19478 /* Less than a full sized pace, lets not */
19479 rack->r_fast_output = 0;
19482 /* Round down to the nearest pace_max_seg */
19483 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg);
19486 rack->r_ctl.fsb.hw_tls = 1;
19488 rack->r_ctl.fsb.hw_tls = 0;
19489 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
19490 ("rack:%p left_to_send:%u sbavail:%u out:%u",
19491 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
19492 (tp->snd_max - tp->snd_una)));
19493 if (rack->r_ctl.fsb.left_to_send < segsiz)
19494 rack->r_fast_output = 0;
19496 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
19497 rack->r_ctl.fsb.rfo_apply_push = 1;
19499 rack->r_ctl.fsb.rfo_apply_push = 0;
19504 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz)
19509 min_time = (uint64_t)get_hpts_min_sleep_time();
19510 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC);
19511 maxlen = roundup(maxlen, segsiz);
19515 static struct rack_sendmap *
19516 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts)
19518 struct rack_sendmap *rsm = NULL;
19522 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point);
19523 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) {
19524 /* Nothing, strange turn off validity */
19525 rack->r_collapse_point_valid = 0;
19528 /* Can we send it yet? */
19529 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) {
19531 * Receiver window has not grown enough for
19532 * the segment to be put on the wire.
19536 if (rsm->r_flags & RACK_ACKED) {
19538 * It has been sacked, lets move to the
19539 * next one if possible.
19541 rack->r_ctl.last_collapse_point = rsm->r_end;
19543 if (SEQ_GEQ(rack->r_ctl.last_collapse_point,
19544 rack->r_ctl.high_collapse_point)) {
19545 rack->r_collapse_point_valid = 0;
19550 /* Now has it been long enough ? */
19551 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts);
19552 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) {
19553 rack_log_collapse(rack, rsm->r_start,
19554 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])),
19555 thresh, __LINE__, 6, rsm->r_flags, rsm);
19558 /* Not enough time */
19559 rack_log_collapse(rack, rsm->r_start,
19560 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])),
19561 thresh, __LINE__, 7, rsm->r_flags, rsm);
19566 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg)
19568 if ((rack->full_size_rxt == 0) &&
19569 (rack->shape_rxt_to_pacing_min == 0) &&
19570 (*len >= segsiz)) {
19572 } else if (rack->shape_rxt_to_pacing_min &&
19574 /* We use pacing min as shaping len req */
19577 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz);
19582 * The else is full_size_rxt is on so send it all
19583 * note we do need to check this for exceeding
19584 * our max segment size due to the fact that
19585 * we do sometimes merge chunks together i.e.
19586 * we cannot just assume that we will never have
19587 * a chunk greater than pace_max_seg
19589 if (*len > pace_max_seg)
19590 *len = pace_max_seg;
19595 rack_output(struct tcpcb *tp)
19599 uint32_t sb_offset, s_moff = 0;
19600 int32_t len, error = 0;
19602 struct mbuf *m, *s_mb = NULL;
19604 uint32_t if_hw_tsomaxsegcount = 0;
19605 uint32_t if_hw_tsomaxsegsize;
19606 int32_t segsiz, minseg;
19607 long tot_len_this_send = 0;
19609 struct ip *ip = NULL;
19611 struct udphdr *udp = NULL;
19612 struct tcp_rack *rack;
19616 uint8_t check_done = 0;
19617 uint8_t wanted_cookie = 0;
19618 u_char opt[TCP_MAXOLEN];
19619 unsigned ipoptlen, optlen, hdrlen, ulen=0;
19622 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
19623 unsigned ipsec_optlen = 0;
19626 int32_t idle, sendalot;
19627 int32_t sub_from_prr = 0;
19628 volatile int32_t sack_rxmit;
19629 struct rack_sendmap *rsm = NULL;
19633 int32_t sup_rack = 0;
19634 uint32_t cts, ms_cts, delayed, early;
19635 uint16_t add_flag = RACK_SENT_SP;
19636 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */
19637 uint8_t hpts_calling, doing_tlp = 0;
19638 uint32_t cwnd_to_use, pace_max_seg;
19639 int32_t do_a_prefetch = 0;
19640 int32_t prefetch_rsm = 0;
19641 int32_t orig_len = 0;
19643 int32_t prefetch_so_done = 0;
19644 struct tcp_log_buffer *lgb;
19645 struct inpcb *inp = tptoinpcb(tp);
19646 struct sockbuf *sb;
19647 uint64_t ts_val = 0;
19648 #ifdef TCP_ACCOUNTING
19652 struct ip6_hdr *ip6 = NULL;
19655 bool hw_tls = false;
19657 NET_EPOCH_ASSERT();
19658 INP_WLOCK_ASSERT(inp);
19660 /* setup and take the cache hits here */
19661 rack = (struct tcp_rack *)tp->t_fb_ptr;
19662 #ifdef TCP_ACCOUNTING
19664 ts_val = get_cyclecount();
19666 hpts_calling = inp->inp_hpts_calls;
19667 rack->rc_inp->inp_hpts_calls = 0;
19669 if (tp->t_flags & TF_TOE) {
19670 #ifdef TCP_ACCOUNTING
19673 return (tcp_offload_output(tp));
19676 if (rack->rack_deferred_inited == 0) {
19678 * If we are the connecting socket we will
19679 * hit rack_init() when no sequence numbers
19680 * are setup. This makes it so we must defer
19681 * some initialization. Call that now.
19683 rack_deferred_init(tp, rack);
19686 * For TFO connections in SYN_RECEIVED, only allow the initial
19687 * SYN|ACK and those sent by the retransmit timer.
19689 if (IS_FASTOPEN(tp->t_flags) &&
19690 (tp->t_state == TCPS_SYN_RECEIVED) &&
19691 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */
19692 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */
19693 #ifdef TCP_ACCOUNTING
19699 if (rack->r_state) {
19700 /* Use the cache line loaded if possible */
19701 isipv6 = rack->r_is_v6;
19703 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0;
19707 cts = tcp_get_usecs(&tv);
19708 ms_cts = tcp_tv_to_mssectick(&tv);
19709 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
19710 tcp_in_hpts(rack->rc_inp)) {
19712 * We are on the hpts for some timer but not hptsi output.
19713 * Remove from the hpts unconditionally.
19715 rack_timer_cancel(tp, rack, cts, __LINE__);
19717 /* Are we pacing and late? */
19718 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
19719 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) {
19720 /* We are delayed */
19721 delayed = cts - rack->r_ctl.rc_last_output_to;
19725 /* Do the timers, which may override the pacer */
19726 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
19729 retval = rack_process_timers(tp, rack, cts, hpts_calling,
19732 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
19733 #ifdef TCP_ACCOUNTING
19737 * If timers want tcp_drop(), then pass error out,
19738 * otherwise suppress it.
19740 return (retval < 0 ? retval : 0);
19743 if (rack->rc_in_persist) {
19744 if (tcp_in_hpts(rack->rc_inp) == 0) {
19745 /* Timer is not running */
19746 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
19748 #ifdef TCP_ACCOUNTING
19753 if ((rack->rc_ack_required == 1) &&
19754 (rack->r_timer_override == 0)){
19755 /* A timeout occurred and no ack has arrived */
19756 if (tcp_in_hpts(rack->rc_inp) == 0) {
19757 /* Timer is not running */
19758 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
19760 #ifdef TCP_ACCOUNTING
19765 if ((rack->r_timer_override) ||
19766 (rack->rc_ack_can_sendout_data) ||
19768 (tp->t_state < TCPS_ESTABLISHED)) {
19769 rack->rc_ack_can_sendout_data = 0;
19770 if (tcp_in_hpts(rack->rc_inp))
19771 tcp_hpts_remove(rack->rc_inp);
19772 } else if (tcp_in_hpts(rack->rc_inp)) {
19774 * On the hpts you can't pass even if ACKNOW is on, we will
19775 * when the hpts fires.
19777 #ifdef TCP_ACCOUNTING
19778 crtsc = get_cyclecount();
19779 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19780 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val);
19782 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19783 tp->tcp_cnt_counters[SND_BLOCKED]++;
19787 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
19790 /* Finish out both pacing early and late accounting */
19791 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
19792 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) {
19793 early = rack->r_ctl.rc_last_output_to - cts;
19797 rack->r_ctl.rc_agg_delayed += delayed;
19799 } else if (early) {
19800 rack->r_ctl.rc_agg_early += early;
19803 /* Now that early/late accounting is done turn off the flag */
19804 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
19805 rack->r_wanted_output = 0;
19806 rack->r_timer_override = 0;
19807 if ((tp->t_state != rack->r_state) &&
19808 TCPS_HAVEESTABLISHED(tp->t_state)) {
19809 rack_set_state(tp, rack);
19811 if ((rack->r_fast_output) &&
19812 (doing_tlp == 0) &&
19813 (tp->rcv_numsacks == 0)) {
19817 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
19821 inp = rack->rc_inp;
19822 so = inp->inp_socket;
19827 inp = rack->rc_inp;
19829 * For TFO connections in SYN_SENT or SYN_RECEIVED,
19830 * only allow the initial SYN or SYN|ACK and those sent
19831 * by the retransmit timer.
19833 if (IS_FASTOPEN(tp->t_flags) &&
19834 ((tp->t_state == TCPS_SYN_RECEIVED) ||
19835 (tp->t_state == TCPS_SYN_SENT)) &&
19836 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */
19837 (tp->t_rxtshift == 0)) { /* not a retransmit */
19838 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
19839 so = inp->inp_socket;
19841 goto just_return_nolock;
19844 * Determine length of data that should be transmitted, and flags
19845 * that will be used. If there is some data or critical controls
19846 * (SYN, RST) to send, then transmit; otherwise, investigate
19849 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
19850 if (tp->t_idle_reduce) {
19851 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur))
19852 rack_cc_after_idle(rack, tp);
19854 tp->t_flags &= ~TF_LASTIDLE;
19856 if (tp->t_flags & TF_MORETOCOME) {
19857 tp->t_flags |= TF_LASTIDLE;
19861 if ((tp->snd_una == tp->snd_max) &&
19862 rack->r_ctl.rc_went_idle_time &&
19863 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) {
19864 idle = cts - rack->r_ctl.rc_went_idle_time;
19865 if (idle > rack_min_probertt_hold) {
19866 /* Count as a probe rtt */
19867 if (rack->in_probe_rtt == 0) {
19868 rack->r_ctl.rc_lower_rtt_us_cts = cts;
19869 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
19870 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
19871 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
19873 rack_exit_probertt(rack, cts);
19878 if (rack_use_fsb &&
19879 (rack->r_ctl.fsb.tcp_ip_hdr) &&
19880 (rack->r_fsb_inited == 0) &&
19881 (rack->r_state != TCPS_CLOSED))
19882 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]);
19885 * If we've recently taken a timeout, snd_max will be greater than
19886 * snd_nxt. There may be SACK information that allows us to avoid
19887 * resending already delivered data. Adjust snd_nxt accordingly.
19890 cts = tcp_get_usecs(&tv);
19891 ms_cts = tcp_tv_to_mssectick(&tv);
19894 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
19896 if (rack->r_ctl.rc_pace_max_segs == 0)
19897 pace_max_seg = rack->rc_user_set_max_segs * segsiz;
19899 pace_max_seg = rack->r_ctl.rc_pace_max_segs;
19900 sb_offset = tp->snd_max - tp->snd_una;
19901 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
19902 flags = tcp_outflags[tp->t_state];
19903 while (rack->rc_free_cnt < rack_free_cache) {
19904 rsm = rack_alloc(rack);
19907 /* Retry in a ms */
19908 slot = (1 * HPTS_USEC_IN_MSEC);
19909 so = inp->inp_socket;
19911 goto just_return_nolock;
19913 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
19914 rack->rc_free_cnt++;
19920 if (flags & TH_RST) {
19921 SOCKBUF_LOCK(&inp->inp_socket->so_snd);
19922 so = inp->inp_socket;
19926 if (rack->r_ctl.rc_resend) {
19927 /* Retransmit timer */
19928 rsm = rack->r_ctl.rc_resend;
19929 rack->r_ctl.rc_resend = NULL;
19930 len = rsm->r_end - rsm->r_start;
19933 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
19934 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
19935 __func__, __LINE__,
19936 rsm->r_start, tp->snd_una, tp, rack, rsm));
19937 sb_offset = rsm->r_start - tp->snd_una;
19938 rack_validate_sizes(rack, &len, segsiz, pace_max_seg);
19939 } else if (rack->r_collapse_point_valid &&
19940 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) {
19942 * If an RSM is returned then enough time has passed
19943 * for us to retransmit it. Move up the collapse point,
19944 * since this rsm has its chance to retransmit now.
19946 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT);
19947 rack->r_ctl.last_collapse_point = rsm->r_end;
19949 if (SEQ_GEQ(rack->r_ctl.last_collapse_point,
19950 rack->r_ctl.high_collapse_point))
19951 rack->r_collapse_point_valid = 0;
19953 /* We are not doing a TLP */
19955 len = rsm->r_end - rsm->r_start;
19956 sb_offset = rsm->r_start - tp->snd_una;
19958 rack_validate_sizes(rack, &len, segsiz, pace_max_seg);
19959 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) {
19960 /* We have a retransmit that takes precedence */
19961 if ((!IN_FASTRECOVERY(tp->t_flags)) &&
19962 ((rsm->r_flags & RACK_MUST_RXT) == 0) &&
19963 ((tp->t_flags & TF_WASFRECOVERY) == 0)) {
19964 /* Enter recovery if not induced by a time-out */
19965 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
19968 if (SEQ_LT(rsm->r_start, tp->snd_una)) {
19969 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
19970 tp, rack, rsm, rsm->r_start, tp->snd_una);
19973 len = rsm->r_end - rsm->r_start;
19974 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
19975 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
19976 __func__, __LINE__,
19977 rsm->r_start, tp->snd_una, tp, rack, rsm));
19978 sb_offset = rsm->r_start - tp->snd_una;
19980 rack_validate_sizes(rack, &len, segsiz, pace_max_seg);
19983 KMOD_TCPSTAT_INC(tcps_sack_rexmits);
19984 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes,
19987 } else if (rack->r_ctl.rc_tlpsend) {
19988 /* Tail loss probe */
19993 * Check if we can do a TLP with a RACK'd packet
19994 * this can happen if we are not doing the rack
19995 * cheat and we skipped to a TLP and it
19998 rsm = rack->r_ctl.rc_tlpsend;
19999 /* We are doing a TLP make sure the flag is preent */
20000 rsm->r_flags |= RACK_TLP;
20001 rack->r_ctl.rc_tlpsend = NULL;
20003 tlen = rsm->r_end - rsm->r_start;
20006 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
20007 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
20008 __func__, __LINE__,
20009 rsm->r_start, tp->snd_una, tp, rack, rsm));
20010 sb_offset = rsm->r_start - tp->snd_una;
20011 cwin = min(tp->snd_wnd, tlen);
20014 if (rack->r_must_retran &&
20015 (doing_tlp == 0) &&
20016 (SEQ_GT(tp->snd_max, tp->snd_una)) &&
20019 * There are two different ways that we
20020 * can get into this block:
20021 * a) This is a non-sack connection, we had a time-out
20022 * and thus r_must_retran was set and everything
20023 * left outstanding as been marked for retransmit.
20024 * b) The MTU of the path shrank, so that everything
20025 * was marked to be retransmitted with the smaller
20026 * mtu and r_must_retran was set.
20028 * This means that we expect the sendmap (outstanding)
20029 * to all be marked must. We can use the tmap to
20033 int sendwin, flight;
20035 sendwin = min(tp->snd_wnd, tp->snd_cwnd);
20036 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto);
20037 if (flight >= sendwin) {
20039 * We can't send yet.
20041 so = inp->inp_socket;
20043 goto just_return_nolock;
20046 * This is the case a/b mentioned above. All
20047 * outstanding/not-acked should be marked.
20048 * We can use the tmap to find them.
20050 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
20053 rack->r_must_retran = 0;
20054 rack->r_ctl.rc_out_at_rto = 0;
20055 so = inp->inp_socket;
20057 goto just_return_nolock;
20059 if ((rsm->r_flags & RACK_MUST_RXT) == 0) {
20061 * The first one does not have the flag, did we collapse
20062 * further up in our list?
20064 rack->r_must_retran = 0;
20065 rack->r_ctl.rc_out_at_rto = 0;
20070 len = rsm->r_end - rsm->r_start;
20071 sb_offset = rsm->r_start - tp->snd_una;
20073 if ((rack->full_size_rxt == 0) &&
20074 (rack->shape_rxt_to_pacing_min == 0) &&
20077 else if (rack->shape_rxt_to_pacing_min &&
20079 /* We use pacing min as shaping len req */
20082 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz);
20087 * Delay removing the flag RACK_MUST_RXT so
20088 * that the fastpath for retransmit will
20089 * work with this rsm.
20094 * Enforce a connection sendmap count limit if set
20095 * as long as we are not retransmiting.
20097 if ((rsm == NULL) &&
20098 (rack->do_detection == 0) &&
20099 (V_tcp_map_entries_limit > 0) &&
20100 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
20101 counter_u64_add(rack_to_alloc_limited, 1);
20102 if (!rack->alloc_limit_reported) {
20103 rack->alloc_limit_reported = 1;
20104 counter_u64_add(rack_alloc_limited_conns, 1);
20106 so = inp->inp_socket;
20108 goto just_return_nolock;
20110 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
20111 /* we are retransmitting the fin */
20115 * When retransmitting data do *not* include the
20116 * FIN. This could happen from a TLP probe.
20121 if (rsm && rack->r_fsb_inited &&
20122 rack_use_rsm_rfo &&
20123 ((rsm->r_flags & RACK_HAS_FIN) == 0)) {
20126 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp);
20130 so = inp->inp_socket;
20132 if (do_a_prefetch == 0) {
20133 kern_prefetch(sb, &do_a_prefetch);
20136 #ifdef NETFLIX_SHARED_CWND
20137 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) &&
20138 rack->rack_enable_scwnd) {
20139 /* We are doing cwnd sharing */
20140 if (rack->gp_ready &&
20141 (rack->rack_attempted_scwnd == 0) &&
20142 (rack->r_ctl.rc_scw == NULL) &&
20144 /* The pcbid is in, lets make an attempt */
20145 counter_u64_add(rack_try_scwnd, 1);
20146 rack->rack_attempted_scwnd = 1;
20147 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp,
20148 &rack->r_ctl.rc_scw_index,
20151 if (rack->r_ctl.rc_scw &&
20152 (rack->rack_scwnd_is_idle == 1) &&
20153 sbavail(&so->so_snd)) {
20154 /* we are no longer out of data */
20155 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
20156 rack->rack_scwnd_is_idle = 0;
20158 if (rack->r_ctl.rc_scw) {
20159 /* First lets update and get the cwnd */
20160 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw,
20161 rack->r_ctl.rc_scw_index,
20162 tp->snd_cwnd, tp->snd_wnd, segsiz);
20167 * Get standard flags, and add SYN or FIN if requested by 'hidden'
20170 if (tp->t_flags & TF_NEEDFIN)
20172 if (tp->t_flags & TF_NEEDSYN)
20174 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
20176 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
20178 kern_prefetch(end_rsm, &prefetch_rsm);
20183 * If snd_nxt == snd_max and we have transmitted a FIN, the
20184 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
20185 * negative length. This can also occur when TCP opens up its
20186 * congestion window while receiving additional duplicate acks after
20187 * fast-retransmit because TCP will reset snd_nxt to snd_max after
20188 * the fast-retransmit.
20190 * In the normal retransmit-FIN-only case, however, snd_nxt will be
20191 * set to snd_una, the sb_offset will be 0, and the length may wind
20194 * If sack_rxmit is true we are retransmitting from the scoreboard
20195 * in which case len is already set.
20197 if ((sack_rxmit == 0) &&
20198 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) {
20201 avail = sbavail(sb);
20202 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail)
20203 sb_offset = tp->snd_nxt - tp->snd_una;
20206 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) {
20207 if (rack->r_ctl.rc_tlp_new_data) {
20208 /* TLP is forcing out new data */
20209 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
20210 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
20212 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) {
20213 if (tp->snd_wnd > sb_offset)
20214 len = tp->snd_wnd - sb_offset;
20218 len = rack->r_ctl.rc_tlp_new_data;
20220 rack->r_ctl.rc_tlp_new_data = 0;
20222 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset);
20224 if ((rack->r_ctl.crte == NULL) &&
20225 IN_FASTRECOVERY(tp->t_flags) &&
20226 (rack->full_size_rxt == 0) &&
20227 (rack->shape_rxt_to_pacing_min == 0) &&
20230 * For prr=off, we need to send only 1 MSS
20231 * at a time. We do this because another sack could
20232 * be arriving that causes us to send retransmits and
20233 * we don't want to be on a long pace due to a larger send
20234 * that keeps us from sending out the retransmit.
20237 } else if (rack->shape_rxt_to_pacing_min &&
20239 /* We use pacing min as shaping len req */
20242 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz);
20245 }/* The else is full_size_rxt is on so send it all */
20247 uint32_t outstanding;
20249 * We are inside of a Fast recovery episode, this
20250 * is caused by a SACK or 3 dup acks. At this point
20251 * we have sent all the retransmissions and we rely
20252 * on PRR to dictate what we will send in the form of
20256 outstanding = tp->snd_max - tp->snd_una;
20257 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) {
20258 if (tp->snd_wnd > outstanding) {
20259 len = tp->snd_wnd - outstanding;
20260 /* Check to see if we have the data */
20261 if ((sb_offset + len) > avail) {
20262 /* It does not all fit */
20263 if (avail > sb_offset)
20264 len = avail - sb_offset;
20271 } else if (avail > sb_offset) {
20272 len = avail - sb_offset;
20277 if (len > rack->r_ctl.rc_prr_sndcnt) {
20278 len = rack->r_ctl.rc_prr_sndcnt;
20284 if (len > segsiz) {
20286 * We should never send more than a MSS when
20287 * retransmitting or sending new data in prr
20288 * mode unless the override flag is on. Most
20289 * likely the PRR algorithm is not going to
20290 * let us send a lot as well :-)
20292 if (rack->r_ctl.rc_prr_sendalot == 0) {
20295 } else if (len < segsiz) {
20297 * Do we send any? The idea here is if the
20298 * send empty's the socket buffer we want to
20299 * do it. However if not then lets just wait
20300 * for our prr_sndcnt to get bigger.
20304 leftinsb = sbavail(sb) - sb_offset;
20305 if (leftinsb > len) {
20306 /* This send does not empty the sb */
20311 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) {
20313 * If you have not established
20314 * and are not doing FAST OPEN
20317 if ((sack_rxmit == 0) &&
20318 (!IS_FASTOPEN(tp->t_flags))){
20323 if (prefetch_so_done == 0) {
20324 kern_prefetch(so, &prefetch_so_done);
20325 prefetch_so_done = 1;
20328 * Lop off SYN bit if it has already been sent. However, if this is
20329 * SYN-SENT state and if segment contains data and if we don't know
20330 * that foreign host supports TAO, suppress sending segment.
20332 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) &&
20333 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) {
20335 * When sending additional segments following a TFO SYN|ACK,
20336 * do not include the SYN bit.
20338 if (IS_FASTOPEN(tp->t_flags) &&
20339 (tp->t_state == TCPS_SYN_RECEIVED))
20343 * Be careful not to send data and/or FIN on SYN segments. This
20344 * measure is needed to prevent interoperability problems with not
20345 * fully conformant TCP implementations.
20347 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
20352 * On TFO sockets, ensure no data is sent in the following cases:
20354 * - When retransmitting SYN|ACK on a passively-created socket
20356 * - When retransmitting SYN on an actively created socket
20358 * - When sending a zero-length cookie (cookie request) on an
20359 * actively created socket
20361 * - When the socket is in the CLOSED state (RST is being sent)
20363 if (IS_FASTOPEN(tp->t_flags) &&
20364 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
20365 ((tp->t_state == TCPS_SYN_SENT) &&
20366 (tp->t_tfo_client_cookie_len == 0)) ||
20367 (flags & TH_RST))) {
20371 /* Without fast-open there should never be data sent on a SYN */
20372 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) {
20373 tp->snd_nxt = tp->iss;
20376 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) {
20377 /* We only send 1 MSS if we have a DSACK block */
20378 add_flag |= RACK_SENT_W_DSACK;
20384 * If FIN has been sent but not acked, but we haven't been
20385 * called to retransmit, len will be < 0. Otherwise, window
20386 * shrank after we sent into it. If window shrank to 0,
20387 * cancel pending retransmit, pull snd_nxt back to (closed)
20388 * window, and set the persist timer if it isn't already
20389 * going. If the window didn't close completely, just wait
20392 * We also do a general check here to ensure that we will
20393 * set the persist timer when we have data to send, but a
20394 * 0-byte window. This makes sure the persist timer is set
20395 * even if the packet hits one of the "goto send" lines
20399 if ((tp->snd_wnd == 0) &&
20400 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
20401 (tp->snd_una == tp->snd_max) &&
20402 (sb_offset < (int)sbavail(sb))) {
20403 rack_enter_persist(tp, rack, cts, tp->snd_una);
20405 } else if ((rsm == NULL) &&
20406 (doing_tlp == 0) &&
20407 (len < pace_max_seg)) {
20409 * We are not sending a maximum sized segment for
20410 * some reason. Should we not send anything (think
20411 * sws or persists)?
20413 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
20414 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
20416 (len < (int)(sbavail(sb) - sb_offset))) {
20418 * Here the rwnd is less than
20419 * the minimum pacing size, this is not a retransmit,
20420 * we are established and
20421 * the send is not the last in the socket buffer
20422 * we send nothing, and we may enter persists
20423 * if nothing is outstanding.
20426 if (tp->snd_max == tp->snd_una) {
20428 * Nothing out we can
20429 * go into persists.
20431 rack_enter_persist(tp, rack, cts, tp->snd_una);
20433 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) &&
20434 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
20435 (len < (int)(sbavail(sb) - sb_offset)) &&
20438 * Here we are not retransmitting, and
20439 * the cwnd is not so small that we could
20440 * not send at least a min size (rxt timer
20441 * not having gone off), We have 2 segments or
20442 * more already in flight, its not the tail end
20443 * of the socket buffer and the cwnd is blocking
20444 * us from sending out a minimum pacing segment size.
20445 * Lets not send anything.
20448 } else if (((tp->snd_wnd - ctf_outstanding(tp)) <
20449 min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
20450 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
20451 (len < (int)(sbavail(sb) - sb_offset)) &&
20452 (TCPS_HAVEESTABLISHED(tp->t_state))) {
20454 * Here we have a send window but we have
20455 * filled it up and we can't send another pacing segment.
20456 * We also have in flight more than 2 segments
20457 * and we are not completing the sb i.e. we allow
20458 * the last bytes of the sb to go out even if
20459 * its not a full pacing segment.
20462 } else if ((rack->r_ctl.crte != NULL) &&
20463 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) &&
20464 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) &&
20465 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) &&
20466 (len < (int)(sbavail(sb) - sb_offset))) {
20468 * Here we are doing hardware pacing, this is not a TLP,
20469 * we are not sending a pace max segment size, there is rwnd
20470 * room to send at least N pace_max_seg, the cwnd is greater
20471 * than or equal to a full pacing segments plus 4 mss and we have 2 or
20472 * more segments in flight and its not the tail of the socket buffer.
20474 * We don't want to send instead we need to get more ack's in to
20475 * allow us to send a full pacing segment. Normally, if we are pacing
20476 * about the right speed, we should have finished our pacing
20477 * send as most of the acks have come back if we are at the
20478 * right rate. This is a bit fuzzy since return path delay
20479 * can delay the acks, which is why we want to make sure we
20480 * have cwnd space to have a bit more than a max pace segments in flight.
20482 * If we have not gotten our acks back we are pacing at too high a
20483 * rate delaying will not hurt and will bring our GP estimate down by
20484 * injecting the delay. If we don't do this we will send
20485 * 2 MSS out in response to the acks being clocked in which
20486 * defeats the point of hw-pacing (i.e. to help us get
20487 * larger TSO's out).
20493 /* len will be >= 0 after this point. */
20494 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
20495 rack_sndbuf_autoscale(rack);
20497 * Decide if we can use TCP Segmentation Offloading (if supported by
20500 * TSO may only be used if we are in a pure bulk sending state. The
20501 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
20502 * options prevent using TSO. With TSO the TCP header is the same
20503 * (except for the sequence number) for all generated packets. This
20504 * makes it impossible to transmit any options which vary per
20505 * generated segment or packet.
20507 * IPv4 handling has a clear separation of ip options and ip header
20508 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
20509 * the right thing below to provide length of just ip options and thus
20510 * checking for ipoptlen is enough to decide if ip options are present.
20513 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
20515 * Pre-calculate here as we save another lookup into the darknesses
20516 * of IPsec that way and can actually decide if TSO is ok.
20519 if (isipv6 && IPSEC_ENABLED(ipv6))
20520 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp);
20526 if (IPSEC_ENABLED(ipv4))
20527 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp);
20531 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
20532 ipoptlen += ipsec_optlen;
20534 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz &&
20535 (tp->t_port == 0) &&
20536 ((tp->t_flags & TF_SIGNATURE) == 0) &&
20537 tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
20541 uint32_t outstanding __unused;
20543 outstanding = tp->snd_max - tp->snd_una;
20544 if (tp->t_flags & TF_SENTFIN) {
20546 * If we sent a fin, snd_max is 1 higher than
20552 if ((rsm->r_flags & RACK_HAS_FIN) == 0)
20555 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
20560 recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
20561 (long)TCP_MAXWIN << tp->rcv_scale);
20564 * Sender silly window avoidance. We transmit under the following
20565 * conditions when len is non-zero:
20567 * - We have a full segment (or more with TSO) - This is the last
20568 * buffer in a write()/send() and we are either idle or running
20569 * NODELAY - we've timed out (e.g. persist timer) - we have more
20570 * then 1/2 the maximum send window's worth of data (receiver may be
20571 * limited the window size) - we need to retransmit
20574 if (len >= segsiz) {
20578 * NOTE! on localhost connections an 'ack' from the remote
20579 * end may occur synchronously with the output and cause us
20580 * to flush a buffer queued with moretocome. XXX
20583 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
20584 (idle || (tp->t_flags & TF_NODELAY)) &&
20585 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
20586 (tp->t_flags & TF_NOPUSH) == 0) {
20590 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */
20594 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
20598 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */
20606 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) &&
20607 (ctf_outstanding(tp) < (segsiz * 2))) {
20609 * We have less than two MSS outstanding (delayed ack)
20610 * and our rwnd will not let us send a full sized
20611 * MSS. Lets go ahead and let this small segment
20612 * out because we want to try to have at least two
20613 * packets inflight to not be caught by delayed ack.
20620 * Sending of standalone window updates.
20622 * Window updates are important when we close our window due to a
20623 * full socket buffer and are opening it again after the application
20624 * reads data from it. Once the window has opened again and the
20625 * remote end starts to send again the ACK clock takes over and
20626 * provides the most current window information.
20628 * We must avoid the silly window syndrome whereas every read from
20629 * the receive buffer, no matter how small, causes a window update
20630 * to be sent. We also should avoid sending a flurry of window
20631 * updates when the socket buffer had queued a lot of data and the
20632 * application is doing small reads.
20634 * Prevent a flurry of pointless window updates by only sending an
20635 * update when we can increase the advertized window by more than
20636 * 1/4th of the socket buffer capacity. When the buffer is getting
20637 * full or is very small be more aggressive and send an update
20638 * whenever we can increase by two mss sized segments. In all other
20639 * situations the ACK's to new incoming data will carry further
20640 * window increases.
20642 * Don't send an independent window update if a delayed ACK is
20643 * pending (it will get piggy-backed on it) or the remote side
20644 * already has done a half-close and won't send more data. Skip
20645 * this if the connection is in T/TCP half-open state.
20647 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
20648 !(tp->t_flags & TF_DELACK) &&
20649 !TCPS_HAVERCVDFIN(tp->t_state)) {
20651 * "adv" is the amount we could increase the window, taking
20652 * into account that we are limited by TCP_MAXWIN <<
20659 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
20660 oldwin = (tp->rcv_adv - tp->rcv_nxt);
20664 /* We can't increase the window */
20671 * If the new window size ends up being the same as or less
20672 * than the old size when it is scaled, then don't force
20675 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
20678 if (adv >= (int32_t)(2 * segsiz) &&
20679 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
20680 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
20681 so->so_rcv.sb_hiwat <= 8 * segsiz)) {
20685 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) {
20693 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
20694 * is also a catch-all for the retransmit timer timeout case.
20696 if (tp->t_flags & TF_ACKNOW) {
20700 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
20705 * If our state indicates that FIN should be sent and we have not
20706 * yet done so, then we need to send.
20708 if ((flags & TH_FIN) &&
20709 (tp->snd_nxt == tp->snd_una)) {
20714 * No reason to send a segment, just return.
20717 SOCKBUF_UNLOCK(sb);
20718 just_return_nolock:
20720 int app_limited = CTF_JR_SENT_DATA;
20722 if (tot_len_this_send > 0) {
20723 /* Make sure snd_nxt is up to max */
20724 rack->r_ctl.fsb.recwin = recwin;
20725 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz);
20726 if ((error == 0) &&
20728 ((flags & (TH_SYN|TH_FIN)) == 0) &&
20730 (tp->snd_nxt == tp->snd_max) &&
20731 (tp->rcv_numsacks == 0) &&
20732 rack->r_fsb_inited &&
20733 TCPS_HAVEESTABLISHED(tp->t_state) &&
20734 ((IN_RECOVERY(tp->t_flags)) == 0) &&
20735 (rack->r_must_retran == 0) &&
20736 ((tp->t_flags & TF_NEEDFIN) == 0) &&
20737 (len > 0) && (orig_len > 0) &&
20738 (orig_len > len) &&
20739 ((orig_len - len) >= segsiz) &&
20741 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
20742 /* We can send at least one more MSS using our fsb */
20743 rack_setup_fast_output(tp, rack, sb, len, orig_len,
20744 segsiz, pace_max_seg, hw_tls, flags);
20746 rack->r_fast_output = 0;
20749 rack_log_fsb(rack, tp, so, flags,
20750 ipoptlen, orig_len, len, 0,
20751 1, optlen, __LINE__, 1);
20752 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
20753 tp->snd_nxt = tp->snd_max;
20755 int end_window = 0;
20756 uint32_t seq = tp->gput_ack;
20758 rsm = tqhash_max(rack->r_ctl.tqh);
20761 * Mark the last sent that we just-returned (hinting
20762 * that delayed ack may play a role in any rtt measurement).
20764 rsm->r_just_ret = 1;
20766 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
20767 rack->r_ctl.rc_agg_delayed = 0;
20770 rack->r_ctl.rc_agg_early = 0;
20771 if ((ctf_outstanding(tp) +
20772 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)),
20773 minseg)) >= tp->snd_wnd) {
20774 /* We are limited by the rwnd */
20775 app_limited = CTF_JR_RWND_LIMITED;
20776 if (IN_FASTRECOVERY(tp->t_flags))
20777 rack->r_ctl.rc_prr_sndcnt = 0;
20778 } else if (ctf_outstanding(tp) >= sbavail(sb)) {
20779 /* We are limited by whats available -- app limited */
20780 app_limited = CTF_JR_APP_LIMITED;
20781 if (IN_FASTRECOVERY(tp->t_flags))
20782 rack->r_ctl.rc_prr_sndcnt = 0;
20783 } else if ((idle == 0) &&
20784 ((tp->t_flags & TF_NODELAY) == 0) &&
20785 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
20788 * No delay is not on and the
20789 * user is sending less than 1MSS. This
20790 * brings out SWS avoidance so we
20791 * don't send. Another app-limited case.
20793 app_limited = CTF_JR_APP_LIMITED;
20794 } else if (tp->t_flags & TF_NOPUSH) {
20796 * The user has requested no push of
20797 * the last segment and we are
20798 * at the last segment. Another app
20801 app_limited = CTF_JR_APP_LIMITED;
20802 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) {
20804 app_limited = CTF_JR_CWND_LIMITED;
20805 } else if (IN_FASTRECOVERY(tp->t_flags) &&
20806 (rack->rack_no_prr == 0) &&
20807 (rack->r_ctl.rc_prr_sndcnt < segsiz)) {
20808 app_limited = CTF_JR_PRR;
20810 /* Now why here are we not sending? */
20813 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use);
20816 app_limited = CTF_JR_ASSESSING;
20819 * App limited in some fashion, for our pacing GP
20820 * measurements we don't want any gap (even cwnd).
20821 * Close down the measurement window.
20823 if (rack_cwnd_block_ends_measure &&
20824 ((app_limited == CTF_JR_CWND_LIMITED) ||
20825 (app_limited == CTF_JR_PRR))) {
20827 * The reason we are not sending is
20828 * the cwnd (or prr). We have been configured
20829 * to end the measurement window in
20833 } else if (rack_rwnd_block_ends_measure &&
20834 (app_limited == CTF_JR_RWND_LIMITED)) {
20836 * We are rwnd limited and have been
20837 * configured to end the measurement
20838 * window in this case.
20841 } else if (app_limited == CTF_JR_APP_LIMITED) {
20843 * A true application limited period, we have
20847 } else if (app_limited == CTF_JR_ASSESSING) {
20849 * In the assessing case we hit the end of
20850 * the if/else and had no known reason
20851 * This will panic us under invariants..
20853 * If we get this out in logs we need to
20854 * investagate which reason we missed.
20861 /* Adjust the Gput measurement */
20862 if ((tp->t_flags & TF_GPUTINPROG) &&
20863 SEQ_GT(tp->gput_ack, tp->snd_max)) {
20864 tp->gput_ack = tp->snd_max;
20865 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) {
20867 * There is not enough to measure.
20869 tp->t_flags &= ~TF_GPUTINPROG;
20870 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
20871 rack->r_ctl.rc_gp_srtt /*flex1*/,
20873 0, 0, 18, __LINE__, NULL, 0);
20877 /* Mark the last packet has app limited */
20878 rsm = tqhash_max(rack->r_ctl.tqh);
20879 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
20880 if (rack->r_ctl.rc_app_limited_cnt == 0)
20881 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
20884 * Go out to the end app limited and mark
20885 * this new one as next and move the end_appl up
20888 if (rack->r_ctl.rc_end_appl)
20889 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
20890 rack->r_ctl.rc_end_appl = rsm;
20892 rsm->r_flags |= RACK_APP_LIMITED;
20893 rack->r_ctl.rc_app_limited_cnt++;
20896 rack_log_pacing_delay_calc(rack,
20897 rack->r_ctl.rc_app_limited_cnt, seq,
20898 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0);
20901 /* Check if we need to go into persists or not */
20902 if ((tp->snd_max == tp->snd_una) &&
20903 TCPS_HAVEESTABLISHED(tp->t_state) &&
20905 (sbavail(sb) > tp->snd_wnd) &&
20906 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) {
20907 /* Yes lets make sure to move to persist before timer-start */
20908 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una);
20910 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack);
20911 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use);
20913 #ifdef NETFLIX_SHARED_CWND
20914 if ((sbavail(sb) == 0) &&
20915 rack->r_ctl.rc_scw) {
20916 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
20917 rack->rack_scwnd_is_idle = 1;
20920 #ifdef TCP_ACCOUNTING
20921 if (tot_len_this_send > 0) {
20922 crtsc = get_cyclecount();
20923 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
20924 tp->tcp_cnt_counters[SND_OUT_DATA]++;
20926 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
20927 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
20929 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
20930 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz);
20933 crtsc = get_cyclecount();
20934 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
20935 tp->tcp_cnt_counters[SND_LIMITED]++;
20937 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
20938 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val);
20946 if ((rack->r_ctl.crte != NULL) &&
20948 ((rack->rc_hw_nobuf == 1) ||
20949 (rack_hw_check_queue && (check_done == 0)))) {
20951 * We only want to do this once with the hw_check_queue,
20952 * for the enobuf case we would only do it once if
20953 * we come around to again, the flag will be clear.
20956 slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz);
20958 rack->r_ctl.rc_agg_delayed = 0;
20959 rack->r_ctl.rc_agg_early = 0;
20962 SOCKBUF_UNLOCK(&so->so_snd);
20963 goto skip_all_send;
20966 if (rsm || sack_rxmit)
20967 counter_u64_add(rack_nfto_resend, 1);
20969 counter_u64_add(rack_non_fto_send, 1);
20970 if ((flags & TH_FIN) &&
20973 * We do not transmit a FIN
20974 * with data outstanding. We
20975 * need to make it so all data
20980 /* Enforce stack imposed max seg size if we have one */
20981 if (rack->r_ctl.rc_pace_max_segs &&
20982 (len > rack->r_ctl.rc_pace_max_segs)) {
20984 len = rack->r_ctl.rc_pace_max_segs;
20986 SOCKBUF_LOCK_ASSERT(sb);
20989 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
20991 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
20994 * Before ESTABLISHED, force sending of initial options unless TCP
20995 * set not to do any options. NOTE: we assume that the IP/TCP header
20996 * plus TCP options always fit in a single mbuf, leaving room for a
20997 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
20998 * + optlen <= MCLBYTES
21003 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
21006 hdrlen = sizeof(struct tcpiphdr);
21009 * Compute options for segment. We only have to care about SYN and
21010 * established connection segments. Options for SYN-ACK segments
21011 * are handled in TCP syncache.
21014 if ((tp->t_flags & TF_NOOPT) == 0) {
21015 /* Maximum segment size. */
21016 if (flags & TH_SYN) {
21017 tp->snd_nxt = tp->iss;
21018 to.to_mss = tcp_mssopt(&inp->inp_inc);
21020 to.to_mss -= V_tcp_udp_tunneling_overhead;
21021 to.to_flags |= TOF_MSS;
21024 * On SYN or SYN|ACK transmits on TFO connections,
21025 * only include the TFO option if it is not a
21026 * retransmit, as the presence of the TFO option may
21027 * have caused the original SYN or SYN|ACK to have
21028 * been dropped by a middlebox.
21030 if (IS_FASTOPEN(tp->t_flags) &&
21031 (tp->t_rxtshift == 0)) {
21032 if (tp->t_state == TCPS_SYN_RECEIVED) {
21033 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
21035 (u_int8_t *)&tp->t_tfo_cookie.server;
21036 to.to_flags |= TOF_FASTOPEN;
21038 } else if (tp->t_state == TCPS_SYN_SENT) {
21040 tp->t_tfo_client_cookie_len;
21042 tp->t_tfo_cookie.client;
21043 to.to_flags |= TOF_FASTOPEN;
21046 * If we wind up having more data to
21047 * send with the SYN than can fit in
21048 * one segment, don't send any more
21049 * until the SYN|ACK comes back from
21056 /* Window scaling. */
21057 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
21058 to.to_wscale = tp->request_r_scale;
21059 to.to_flags |= TOF_SCALE;
21062 if ((tp->t_flags & TF_RCVD_TSTMP) ||
21063 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
21064 to.to_tsval = ms_cts + tp->ts_offset;
21065 to.to_tsecr = tp->ts_recent;
21066 to.to_flags |= TOF_TS;
21068 /* Set receive buffer autosizing timestamp. */
21069 if (tp->rfbuf_ts == 0 &&
21070 (so->so_rcv.sb_flags & SB_AUTOSIZE))
21071 tp->rfbuf_ts = tcp_ts_getticks();
21072 /* Selective ACK's. */
21073 if (tp->t_flags & TF_SACK_PERMIT) {
21074 if (flags & TH_SYN)
21075 to.to_flags |= TOF_SACKPERM;
21076 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
21077 tp->rcv_numsacks > 0) {
21078 to.to_flags |= TOF_SACK;
21079 to.to_nsacks = tp->rcv_numsacks;
21080 to.to_sacks = (u_char *)tp->sackblks;
21083 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
21084 /* TCP-MD5 (RFC2385). */
21085 if (tp->t_flags & TF_SIGNATURE)
21086 to.to_flags |= TOF_SIGNATURE;
21087 #endif /* TCP_SIGNATURE */
21089 /* Processing the options. */
21090 hdrlen += optlen = tcp_addoptions(&to, opt);
21092 * If we wanted a TFO option to be added, but it was unable
21093 * to fit, ensure no data is sent.
21095 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
21096 !(to.to_flags & TOF_FASTOPEN))
21100 if (V_tcp_udp_tunneling_port == 0) {
21101 /* The port was removed?? */
21102 SOCKBUF_UNLOCK(&so->so_snd);
21103 #ifdef TCP_ACCOUNTING
21104 crtsc = get_cyclecount();
21105 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
21106 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
21108 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
21109 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
21113 return (EHOSTUNREACH);
21115 hdrlen += sizeof(struct udphdr);
21119 ipoptlen = ip6_optlen(inp);
21122 if (inp->inp_options)
21123 ipoptlen = inp->inp_options->m_len -
21124 offsetof(struct ipoption, ipopt_list);
21127 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
21128 ipoptlen += ipsec_optlen;
21132 * Adjust data length if insertion of options will bump the packet
21133 * length beyond the t_maxseg length. Clear the FIN bit because we
21134 * cut off the tail of the segment.
21136 if (len + optlen + ipoptlen > tp->t_maxseg) {
21138 uint32_t if_hw_tsomax;
21142 /* extract TSO information */
21143 if_hw_tsomax = tp->t_tsomax;
21144 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
21145 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
21146 KASSERT(ipoptlen == 0,
21147 ("%s: TSO can't do IP options", __func__));
21150 * Check if we should limit by maximum payload
21153 if (if_hw_tsomax != 0) {
21154 /* compute maximum TSO length */
21155 max_len = (if_hw_tsomax - hdrlen -
21157 if (max_len <= 0) {
21159 } else if (len > max_len) {
21166 * Prevent the last segment from being fractional
21167 * unless the send sockbuf can be emptied:
21169 max_len = (tp->t_maxseg - optlen);
21170 if ((sb_offset + len) < sbavail(sb)) {
21171 moff = len % (u_int)max_len;
21178 * In case there are too many small fragments don't
21181 if (len <= max_len) {
21186 * Send the FIN in a separate segment after the bulk
21187 * sending is done. We don't trust the TSO
21188 * implementations to clear the FIN flag on all but
21189 * the last segment.
21191 if (tp->t_flags & TF_NEEDFIN) {
21196 if (optlen + ipoptlen >= tp->t_maxseg) {
21198 * Since we don't have enough space to put
21199 * the IP header chain and the TCP header in
21200 * one packet as required by RFC 7112, don't
21201 * send it. Also ensure that at least one
21202 * byte of the payload can be put into the
21205 SOCKBUF_UNLOCK(&so->so_snd);
21210 len = tp->t_maxseg - optlen - ipoptlen;
21217 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
21218 ("%s: len > IP_MAXPACKET", __func__));
21221 if (max_linkhdr + hdrlen > MCLBYTES)
21223 if (max_linkhdr + hdrlen > MHLEN)
21225 panic("tcphdr too big");
21229 * This KASSERT is here to catch edge cases at a well defined place.
21230 * Before, those had triggered (random) panic conditions further
21233 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
21235 (flags & TH_FIN) &&
21238 * We have outstanding data, don't send a fin by itself!.
21243 * Grab a header mbuf, attaching a copy of data to be transmitted,
21244 * and initialize the header from the template for sends on this
21247 hw_tls = tp->t_nic_ktls_xmit != 0;
21252 if (rack->r_ctl.rc_pace_max_segs)
21253 max_val = rack->r_ctl.rc_pace_max_segs;
21254 else if (rack->rc_user_set_max_segs)
21255 max_val = rack->rc_user_set_max_segs * segsiz;
21259 * We allow a limit on sending with hptsi.
21261 if (len > max_val) {
21266 if (MHLEN < hdrlen + max_linkhdr)
21267 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
21270 m = m_gethdr(M_NOWAIT, MT_DATA);
21273 SOCKBUF_UNLOCK(sb);
21278 m->m_data += max_linkhdr;
21282 * Start the m_copy functions from the closest mbuf to the
21283 * sb_offset in the socket buffer chain.
21285 mb = sbsndptr_noadv(sb, sb_offset, &moff);
21288 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
21289 m_copydata(mb, moff, (int)len,
21290 mtod(m, caddr_t)+hdrlen);
21291 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
21292 sbsndptr_adv(sb, mb, len);
21295 struct sockbuf *msb;
21297 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
21301 m->m_next = tcp_m_copym(
21303 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
21304 ((rsm == NULL) ? hw_tls : 0)
21305 #ifdef NETFLIX_COPY_ARGS
21309 if (len <= (tp->t_maxseg - optlen)) {
21311 * Must have ran out of mbufs for the copy
21312 * shorten it to no longer need tso. Lets
21313 * not put on sendalot since we are low on
21318 if (m->m_next == NULL) {
21319 SOCKBUF_UNLOCK(sb);
21326 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
21327 if (rsm && (rsm->r_flags & RACK_TLP)) {
21329 * TLP should not count in retran count, but
21332 counter_u64_add(rack_tlp_retran, 1);
21333 counter_u64_add(rack_tlp_retran_bytes, len);
21335 tp->t_sndrexmitpack++;
21336 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
21337 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
21340 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
21344 KMOD_TCPSTAT_INC(tcps_sndpack);
21345 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
21347 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
21352 * If we're sending everything we've got, set PUSH. (This
21353 * will keep happy those implementations which only give
21354 * data to the user when a buffer fills or a PUSH comes in.)
21356 if (sb_offset + len == sbused(sb) &&
21358 !(flags & TH_SYN)) {
21360 add_flag |= RACK_HAD_PUSH;
21363 SOCKBUF_UNLOCK(sb);
21365 SOCKBUF_UNLOCK(sb);
21366 if (tp->t_flags & TF_ACKNOW)
21367 KMOD_TCPSTAT_INC(tcps_sndacks);
21368 else if (flags & (TH_SYN | TH_FIN | TH_RST))
21369 KMOD_TCPSTAT_INC(tcps_sndctrl);
21371 KMOD_TCPSTAT_INC(tcps_sndwinup);
21373 m = m_gethdr(M_NOWAIT, MT_DATA);
21380 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
21382 M_ALIGN(m, hdrlen);
21385 m->m_data += max_linkhdr;
21388 SOCKBUF_UNLOCK_ASSERT(sb);
21389 m->m_pkthdr.rcvif = (struct ifnet *)0;
21391 mac_inpcb_create_mbuf(inp, m);
21393 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
21396 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
21400 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
21402 th = rack->r_ctl.fsb.th;
21403 udp = rack->r_ctl.fsb.udp;
21407 ulen = hdrlen + len - sizeof(struct ip6_hdr);
21410 ulen = hdrlen + len - sizeof(struct ip);
21411 udp->uh_ulen = htons(ulen);
21416 ip6 = mtod(m, struct ip6_hdr *);
21418 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
21419 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
21420 udp->uh_dport = tp->t_port;
21421 ulen = hdrlen + len - sizeof(struct ip6_hdr);
21422 udp->uh_ulen = htons(ulen);
21423 th = (struct tcphdr *)(udp + 1);
21425 th = (struct tcphdr *)(ip6 + 1);
21426 tcpip_fillheaders(inp, tp->t_port, ip6, th);
21431 ip = mtod(m, struct ip *);
21433 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
21434 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
21435 udp->uh_dport = tp->t_port;
21436 ulen = hdrlen + len - sizeof(struct ip);
21437 udp->uh_ulen = htons(ulen);
21438 th = (struct tcphdr *)(udp + 1);
21440 th = (struct tcphdr *)(ip + 1);
21441 tcpip_fillheaders(inp, tp->t_port, ip, th);
21446 * Fill in fields, remembering maximum advertised window for use in
21447 * delaying messages about window sizes. If resending a FIN, be sure
21448 * not to use a new sequence number.
21450 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
21451 tp->snd_nxt == tp->snd_max)
21454 * If we are starting a connection, send ECN setup SYN packet. If we
21455 * are on a retransmit, we may resend those bits a number of times
21458 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) {
21459 flags |= tcp_ecn_output_syn_sent(tp);
21461 /* Also handle parallel SYN for ECN */
21462 if (TCPS_HAVERCVDSYN(tp->t_state) &&
21463 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
21464 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit);
21465 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
21466 (tp->t_flags2 & TF2_ECN_SND_ECE))
21467 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
21470 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
21471 ip6->ip6_flow |= htonl(ect << 20);
21477 ip->ip_tos &= ~IPTOS_ECN_MASK;
21483 * If we are doing retransmissions, then snd_nxt will not reflect
21484 * the first unsent octet. For ACK only packets, we do not want the
21485 * sequence number of the retransmitted packet, we want the sequence
21486 * number of the next unsent octet. So, if there is no data (and no
21487 * SYN or FIN), use snd_max instead of snd_nxt when filling in
21488 * ti_seq. But if we are in persist state, snd_max might reflect
21489 * one byte beyond the right edge of the window, so use snd_nxt in
21490 * that case, since we know we aren't doing a retransmission.
21491 * (retransmit and persist are mutually exclusive...)
21493 if (sack_rxmit == 0) {
21494 if (len || (flags & (TH_SYN | TH_FIN))) {
21495 th->th_seq = htonl(tp->snd_nxt);
21496 rack_seq = tp->snd_nxt;
21498 th->th_seq = htonl(tp->snd_max);
21499 rack_seq = tp->snd_max;
21502 th->th_seq = htonl(rsm->r_start);
21503 rack_seq = rsm->r_start;
21505 th->th_ack = htonl(tp->rcv_nxt);
21506 tcp_set_flags(th, flags);
21508 * Calculate receive window. Don't shrink window, but avoid silly
21510 * If a RST segment is sent, advertise a window of zero.
21512 if (flags & TH_RST) {
21515 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
21516 recwin < (long)segsiz) {
21519 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
21520 recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
21521 recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
21525 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
21526 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is
21527 * handled in syncache.
21529 if (flags & TH_SYN)
21530 th->th_win = htons((u_short)
21531 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
21533 /* Avoid shrinking window with window scaling. */
21534 recwin = roundup2(recwin, 1 << tp->rcv_scale);
21535 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
21538 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
21539 * window. This may cause the remote transmitter to stall. This
21540 * flag tells soreceive() to disable delayed acknowledgements when
21541 * draining the buffer. This can occur if the receiver is
21542 * attempting to read more data than can be buffered prior to
21543 * transmitting on the connection.
21545 if (th->th_win == 0) {
21546 tp->t_sndzerowin++;
21547 tp->t_flags |= TF_RXWIN0SENT;
21549 tp->t_flags &= ~TF_RXWIN0SENT;
21550 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
21551 /* Now are we using fsb?, if so copy the template data to the mbuf */
21552 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
21555 cpto = mtod(m, uint8_t *);
21556 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
21558 * We have just copied in:
21560 * <optional udphdr>
21561 * tcphdr (no options)
21563 * We need to grab the correct pointers into the mbuf
21564 * for both the tcp header, and possibly the udp header (if tunneling).
21565 * We do this by using the offset in the copy buffer and adding it
21566 * to the mbuf base pointer (cpto).
21570 ip6 = mtod(m, struct ip6_hdr *);
21574 ip = mtod(m, struct ip *);
21576 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
21577 /* If we have a udp header lets set it into the mbuf as well */
21579 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr));
21581 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
21582 if (to.to_flags & TOF_SIGNATURE) {
21584 * Calculate MD5 signature and put it into the place
21585 * determined before.
21586 * NOTE: since TCP options buffer doesn't point into
21587 * mbuf's data, calculate offset and use it.
21589 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
21590 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
21592 * Do not send segment if the calculation of MD5
21593 * digest has failed.
21600 bcopy(opt, th + 1, optlen);
21601 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
21604 * Put TCP length in extended header, and then checksum extended
21607 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
21611 * ip6_plen is not need to be filled now, and will be filled
21615 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
21616 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
21617 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
21618 th->th_sum = htons(0);
21619 UDPSTAT_INC(udps_opackets);
21621 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
21622 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
21623 th->th_sum = in6_cksum_pseudo(ip6,
21624 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
21629 #if defined(INET6) && defined(INET)
21635 m->m_pkthdr.csum_flags = CSUM_UDP;
21636 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
21637 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
21638 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
21639 th->th_sum = htons(0);
21640 UDPSTAT_INC(udps_opackets);
21642 m->m_pkthdr.csum_flags = CSUM_TCP;
21643 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
21644 th->th_sum = in_pseudo(ip->ip_src.s_addr,
21645 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
21646 IPPROTO_TCP + len + optlen));
21648 /* IP version must be set here for ipv4/ipv6 checking later */
21649 KASSERT(ip->ip_v == IPVERSION,
21650 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
21654 * Enable TSO and specify the size of the segments. The TCP pseudo
21655 * header checksum is always provided. XXX: Fixme: This is currently
21656 * not the case for IPv6.
21660 * Here we must use t_maxseg and the optlen since
21661 * the optlen may include SACK's (or DSACK).
21663 KASSERT(len > tp->t_maxseg - optlen,
21664 ("%s: len <= tso_segsz", __func__));
21665 m->m_pkthdr.csum_flags |= CSUM_TSO;
21666 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
21668 KASSERT(len + hdrlen == m_length(m, NULL),
21669 ("%s: mbuf chain different than expected: %d + %u != %u",
21670 __func__, len, hdrlen, m_length(m, NULL)));
21673 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
21674 hhook_run_tcp_est_out(tp, th, &to, len, tso);
21676 if ((rack->r_ctl.crte != NULL) &&
21677 (rack->rc_hw_nobuf == 0) &&
21678 tcp_bblogging_on(tp)) {
21679 rack_log_queue_level(tp, rack, len, &tv, cts);
21681 /* We're getting ready to send; log now. */
21682 if (tcp_bblogging_on(rack->rc_tp)) {
21683 union tcp_log_stackspecific log;
21685 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
21686 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp);
21687 if (rack->rack_no_prr)
21688 log.u_bbr.flex1 = 0;
21690 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
21691 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
21692 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
21693 log.u_bbr.flex4 = orig_len;
21694 /* Save off the early/late values */
21695 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
21696 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
21697 log.u_bbr.bw_inuse = rack_get_bw(rack);
21698 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw;
21699 log.u_bbr.flex8 = 0;
21701 if (rsm->r_flags & RACK_RWND_COLLAPSED) {
21702 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm);
21703 counter_u64_add(rack_collapsed_win_rxt, 1);
21704 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start));
21707 log.u_bbr.flex8 = 2;
21709 log.u_bbr.flex8 = 1;
21712 log.u_bbr.flex8 = 3;
21714 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
21715 log.u_bbr.flex7 = mark;
21716 log.u_bbr.flex7 <<= 8;
21717 log.u_bbr.flex7 |= pass;
21718 log.u_bbr.pkts_out = tp->t_maxseg;
21719 log.u_bbr.timeStamp = cts;
21720 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
21721 if (rsm && (rsm->r_rtr_cnt > 0)) {
21723 * When we have a retransmit we want to log the
21724 * burst at send and flight at send from before.
21726 log.u_bbr.flex5 = rsm->r_fas;
21727 log.u_bbr.bbr_substate = rsm->r_bas;
21730 * New transmits we log in flex5 the inflight again as
21731 * well as the number of segments in our send in the
21734 log.u_bbr.flex5 = log.u_bbr.inflight;
21735 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz);
21737 log.u_bbr.lt_epoch = cwnd_to_use;
21738 log.u_bbr.delivered = sendalot;
21739 log.u_bbr.rttProp = (uint64_t)rsm;
21740 log.u_bbr.pkt_epoch = __LINE__;
21742 log.u_bbr.delRate = rsm->r_flags;
21743 log.u_bbr.delRate <<= 31;
21744 log.u_bbr.delRate |= rack->r_must_retran;
21745 log.u_bbr.delRate <<= 1;
21746 log.u_bbr.delRate |= (sack_rxmit & 0x00000001);
21748 log.u_bbr.delRate = rack->r_must_retran;
21749 log.u_bbr.delRate <<= 1;
21750 log.u_bbr.delRate |= (sack_rxmit & 0x00000001);
21752 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
21753 len, &log, false, NULL, __func__, __LINE__, &tv);
21758 * Fill in IP length and desired time to live and send to IP level.
21759 * There should be a better way to handle ttl and tos; we could keep
21760 * them in the template, but need a way to checksum without them.
21763 * m->m_pkthdr.len should have been set before cksum calcuration,
21764 * because in6_cksum() need it.
21769 * we separately set hoplimit for every segment, since the
21770 * user might want to change the value via setsockopt. Also,
21771 * desired default hop limit might be changed via Neighbor
21774 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL);
21777 * Set the packet size here for the benefit of DTrace
21778 * probes. ip6_output() will set it properly; it's supposed
21779 * to include the option header lengths as well.
21781 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
21783 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
21784 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
21786 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
21788 if (tp->t_state == TCPS_SYN_SENT)
21789 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
21791 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
21792 /* TODO: IPv6 IP6TOS_ECT bit on */
21793 error = ip6_output(m,
21794 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
21795 inp->in6p_outputopts,
21800 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0),
21803 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
21804 mtu = inp->inp_route6.ro_nh->nh_mtu;
21807 #if defined(INET) && defined(INET6)
21812 ip->ip_len = htons(m->m_pkthdr.len);
21814 if (inp->inp_vflag & INP_IPV6PROTO)
21815 ip->ip_ttl = in6_selecthlim(inp, NULL);
21817 rack->r_ctl.fsb.hoplimit = ip->ip_ttl;
21819 * If we do path MTU discovery, then we set DF on every
21820 * packet. This might not be the best thing to do according
21821 * to RFC3390 Section 2. However the tcp hostcache migitates
21822 * the problem so it affects only the first tcp connection
21825 * NB: Don't set DF on small MTU/MSS to have a safe
21828 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
21829 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
21830 if (tp->t_port == 0 || len < V_tcp_minmss) {
21831 ip->ip_off |= htons(IP_DF);
21834 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
21837 if (tp->t_state == TCPS_SYN_SENT)
21838 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
21840 TCP_PROBE5(send, NULL, tp, ip, tp, th);
21842 error = ip_output(m,
21843 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
21849 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0,
21851 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
21852 mtu = inp->inp_route.ro_nh->nh_mtu;
21858 lgb->tlb_errno = error;
21862 * In transmit state, time the transmission and arrange for the
21863 * retransmit. In persist state, just set snd_max.
21865 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error,
21866 rack_to_usec_ts(&tv),
21867 rsm, add_flag, s_mb, s_moff, hw_tls, segsiz);
21870 if (rack->lt_bw_up == 0) {
21871 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv);
21872 rack->r_ctl.lt_seq = tp->snd_una;
21873 rack->lt_bw_up = 1;
21874 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) {
21876 * Need to record what we have since we are
21877 * approaching seq wrap.
21881 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq);
21882 rack->r_ctl.lt_seq = tp->snd_una;
21883 tmark = tcp_tv_to_lusectick(&tv);
21884 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark);
21885 rack->r_ctl.lt_timemark = tmark;
21888 rack->forced_ack = 0; /* If we send something zap the FA flag */
21889 counter_u64_add(rack_total_bytes, len);
21890 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls);
21891 if (rsm && doing_tlp) {
21892 rack->rc_last_sent_tlp_past_cumack = 0;
21893 rack->rc_last_sent_tlp_seq_valid = 1;
21894 rack->r_ctl.last_sent_tlp_seq = rsm->r_start;
21895 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start;
21897 if (rack->rc_hw_nobuf) {
21898 rack->rc_hw_nobuf = 0;
21899 rack->r_ctl.rc_agg_delayed = 0;
21902 rack->r_ctl.rc_agg_early = 0;
21904 if (rsm && (doing_tlp == 0)) {
21905 /* Set we retransmitted */
21906 rack->rc_gp_saw_rec = 1;
21908 if (cwnd_to_use > tp->snd_ssthresh) {
21909 /* Set we sent in CA */
21910 rack->rc_gp_saw_ca = 1;
21912 /* Set we sent in SS */
21913 rack->rc_gp_saw_ss = 1;
21916 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
21917 (tp->t_flags & TF_SACK_PERMIT) &&
21918 tp->rcv_numsacks > 0)
21919 tcp_clean_dsack_blocks(tp);
21920 tot_len_this_send += len;
21922 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
21926 idx = (len / segsiz) + 3;
21927 if (idx >= TCP_MSS_ACCT_ATIMER)
21928 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
21930 counter_u64_add(rack_out_size[idx], 1);
21933 if ((rack->rack_no_prr == 0) &&
21936 if (rack->r_ctl.rc_prr_sndcnt >= len)
21937 rack->r_ctl.rc_prr_sndcnt -= len;
21939 rack->r_ctl.rc_prr_sndcnt = 0;
21943 /* Make sure the TLP is added */
21944 add_flag |= RACK_TLP;
21946 /* If its a resend without TLP then it must not have the flag */
21947 rsm->r_flags &= ~RACK_TLP;
21951 if ((error == 0) &&
21953 (tp->snd_una == tp->snd_max))
21954 rack->r_ctl.rc_tlp_rxt_last_time = cts;
21956 tcp_seq startseq = tp->snd_nxt;
21958 /* Track our lost count */
21959 if (rsm && (doing_tlp == 0))
21960 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start;
21962 * Advance snd_nxt over sequence space of this segment.
21965 /* We don't log or do anything with errors */
21967 if (doing_tlp == 0) {
21970 * Not a retransmission of some
21971 * sort, new data is going out so
21972 * clear our TLP count and flag.
21974 rack->rc_tlp_in_progress = 0;
21975 rack->r_ctl.rc_tlp_cnt_out = 0;
21979 * We have just sent a TLP, mark that it is true
21980 * and make sure our in progress is set so we
21981 * continue to check the count.
21983 rack->rc_tlp_in_progress = 1;
21984 rack->r_ctl.rc_tlp_cnt_out++;
21986 if (flags & (TH_SYN | TH_FIN)) {
21987 if (flags & TH_SYN)
21989 if (flags & TH_FIN) {
21991 tp->t_flags |= TF_SENTFIN;
21994 /* In the ENOBUFS case we do *not* update snd_max */
21998 tp->snd_nxt += len;
21999 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
22000 if (tp->snd_una == tp->snd_max) {
22002 * Update the time we just added data since
22003 * none was outstanding.
22005 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
22006 tp->t_acktime = ticks;
22008 tp->snd_max = tp->snd_nxt;
22009 if (rack->rc_new_rnd_needed) {
22011 * Update the rnd to start ticking not
22012 * that from a time perspective all of
22013 * the preceding idle time is "in the round"
22015 rack->rc_new_rnd_needed = 0;
22016 rack->r_ctl.roundends = tp->snd_max;
22019 * Time this transmission if not a retransmission and
22020 * not currently timing anything.
22021 * This is only relevant in case of switching back to
22024 if (tp->t_rtttime == 0) {
22025 tp->t_rtttime = ticks;
22026 tp->t_rtseq = startseq;
22027 KMOD_TCPSTAT_INC(tcps_segstimed);
22030 ((tp->t_flags & TF_GPUTINPROG) == 0))
22031 rack_start_gp_measurement(tp, rack, startseq, sb_offset);
22034 * If we are doing FO we need to update the mbuf position and subtract
22035 * this happens when the peer sends us duplicate information and
22036 * we thus want to send a DSACK.
22038 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO
22039 * turned off? If not then we are going to echo multiple DSACK blocks
22040 * out (with the TSO), which we should not be doing.
22042 if (rack->r_fast_output && len) {
22043 if (rack->r_ctl.fsb.left_to_send > len)
22044 rack->r_ctl.fsb.left_to_send -= len;
22046 rack->r_ctl.fsb.left_to_send = 0;
22047 if (rack->r_ctl.fsb.left_to_send < segsiz)
22048 rack->r_fast_output = 0;
22049 if (rack->r_fast_output) {
22050 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
22051 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
22052 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m);
22058 rack->r_ctl.rc_agg_delayed = 0;
22061 rack->r_ctl.rc_agg_early = 0;
22062 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */
22064 * Failures do not advance the seq counter above. For the
22065 * case of ENOBUFS we will fall out and retry in 1ms with
22066 * the hpts. Everything else will just have to retransmit
22069 * In any case, we do not want to loop around for another
22070 * send without a good reason.
22075 tp->t_softerror = error;
22076 #ifdef TCP_ACCOUNTING
22077 crtsc = get_cyclecount();
22078 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22079 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
22081 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22082 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
22089 * Pace us right away to retry in a some
22092 if (rack->r_ctl.crte != NULL) {
22093 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF);
22094 if (tcp_bblogging_on(rack->rc_tp))
22095 rack_log_queue_level(tp, rack, len, &tv, cts);
22097 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF);
22098 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
22099 if (rack->rc_enobuf < 0x7f)
22101 if (slot < (10 * HPTS_USEC_IN_MSEC))
22102 slot = 10 * HPTS_USEC_IN_MSEC;
22103 if (rack->r_ctl.crte != NULL) {
22104 counter_u64_add(rack_saw_enobuf_hw, 1);
22105 tcp_rl_log_enobuf(rack->r_ctl.crte);
22107 counter_u64_add(rack_saw_enobuf, 1);
22111 * For some reason the interface we used initially
22112 * to send segments changed to another or lowered
22113 * its MTU. If TSO was active we either got an
22114 * interface without TSO capabilits or TSO was
22115 * turned off. If we obtained mtu from ip_output()
22116 * then update it and try again.
22119 tp->t_flags &= ~TF_TSO;
22123 saved_mtu = tp->t_maxseg;
22124 tcp_mss_update(tp, -1, mtu, NULL, NULL);
22125 if (saved_mtu > tp->t_maxseg) {
22129 slot = 10 * HPTS_USEC_IN_MSEC;
22130 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
22131 #ifdef TCP_ACCOUNTING
22132 crtsc = get_cyclecount();
22133 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22134 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
22136 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22137 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
22143 counter_u64_add(rack_saw_enetunreach, 1);
22147 if (TCPS_HAVERCVDSYN(tp->t_state)) {
22148 tp->t_softerror = error;
22152 slot = 10 * HPTS_USEC_IN_MSEC;
22153 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
22154 #ifdef TCP_ACCOUNTING
22155 crtsc = get_cyclecount();
22156 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22157 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
22159 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22160 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
22167 rack->rc_enobuf = 0;
22168 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
22169 rack->r_ctl.retran_during_recovery += len;
22171 KMOD_TCPSTAT_INC(tcps_sndtotal);
22174 * Data sent (as far as we can tell). If this advertises a larger
22175 * window than any other segment, then remember the size of the
22176 * advertised window. Any pending ACK has now been sent.
22178 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
22179 tp->rcv_adv = tp->rcv_nxt + recwin;
22181 tp->last_ack_sent = tp->rcv_nxt;
22182 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
22185 /* Do we need to turn off sendalot? */
22186 if (rack->r_ctl.rc_pace_max_segs &&
22187 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) {
22188 /* We hit our max. */
22190 } else if ((rack->rc_user_set_max_segs) &&
22191 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) {
22192 /* We hit the user defined max */
22196 if ((error == 0) && (flags & TH_FIN))
22197 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN);
22198 if (flags & TH_RST) {
22200 * We don't send again after sending a RST.
22205 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
22206 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) {
22208 * Get our pacing rate, if an error
22209 * occurred in sending (ENOBUF) we would
22210 * hit the else if with slot preset. Other
22213 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz);
22216 (rsm->r_flags & RACK_HAS_SYN) == 0 &&
22217 rack->use_rack_rr) {
22218 /* Its a retransmit and we use the rack cheat? */
22220 (rack->rc_always_pace == 0) ||
22221 (rack->r_rr_config == 1)) {
22223 * We have no pacing set or we
22224 * are using old-style rack or
22225 * we are overridden to use the old 1ms pacing.
22227 slot = rack->r_ctl.rc_min_to;
22230 /* We have sent clear the flag */
22231 rack->r_ent_rec_ns = 0;
22232 if (rack->r_must_retran) {
22234 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
22235 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
22237 * We have retransmitted all.
22239 rack->r_must_retran = 0;
22240 rack->r_ctl.rc_out_at_rto = 0;
22242 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
22244 * Sending new data will also kill
22247 rack->r_must_retran = 0;
22248 rack->r_ctl.rc_out_at_rto = 0;
22251 rack->r_ctl.fsb.recwin = recwin;
22252 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) &&
22253 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
22255 * We hit an RTO and now have past snd_max at the RTO
22256 * clear all the WAS flags.
22258 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY);
22261 /* set the rack tcb into the slot N */
22262 if ((error == 0) &&
22264 ((flags & (TH_SYN|TH_FIN)) == 0) &&
22266 (tp->snd_nxt == tp->snd_max) &&
22268 (tp->rcv_numsacks == 0) &&
22269 rack->r_fsb_inited &&
22270 TCPS_HAVEESTABLISHED(tp->t_state) &&
22271 ((IN_RECOVERY(tp->t_flags)) == 0) &&
22272 (rack->r_must_retran == 0) &&
22273 ((tp->t_flags & TF_NEEDFIN) == 0) &&
22274 (len > 0) && (orig_len > 0) &&
22275 (orig_len > len) &&
22276 ((orig_len - len) >= segsiz) &&
22278 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
22279 /* We can send at least one more MSS using our fsb */
22280 rack_setup_fast_output(tp, rack, sb, len, orig_len,
22281 segsiz, pace_max_seg, hw_tls, flags);
22283 rack->r_fast_output = 0;
22284 rack_log_fsb(rack, tp, so, flags,
22285 ipoptlen, orig_len, len, error,
22286 (rsm == NULL), optlen, __LINE__, 2);
22287 } else if (sendalot) {
22291 if ((error == 0) &&
22293 ((flags & (TH_SYN|TH_FIN)) == 0) &&
22296 (tp->rcv_numsacks == 0) &&
22297 (tp->snd_nxt == tp->snd_max) &&
22298 (rack->r_must_retran == 0) &&
22299 rack->r_fsb_inited &&
22300 TCPS_HAVEESTABLISHED(tp->t_state) &&
22301 ((IN_RECOVERY(tp->t_flags)) == 0) &&
22302 ((tp->t_flags & TF_NEEDFIN) == 0) &&
22303 (len > 0) && (orig_len > 0) &&
22304 (orig_len > len) &&
22305 ((orig_len - len) >= segsiz) &&
22307 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
22308 /* we can use fast_output for more */
22309 rack_setup_fast_output(tp, rack, sb, len, orig_len,
22310 segsiz, pace_max_seg, hw_tls, flags);
22311 if (rack->r_fast_output) {
22313 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
22323 /* Assure when we leave that snd_nxt will point to top */
22325 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
22326 tp->snd_nxt = tp->snd_max;
22327 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0);
22328 #ifdef TCP_ACCOUNTING
22329 crtsc = get_cyclecount() - ts_val;
22330 if (tot_len_this_send) {
22331 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22332 tp->tcp_cnt_counters[SND_OUT_DATA]++;
22334 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22335 tp->tcp_proc_time[SND_OUT_DATA] += crtsc;
22337 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22338 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz);
22341 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22342 tp->tcp_cnt_counters[SND_OUT_ACK]++;
22344 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22345 tp->tcp_proc_time[SND_OUT_ACK] += crtsc;
22350 if (error == ENOBUFS)
22356 rack_update_seg(struct tcp_rack *rack)
22360 orig_val = rack->r_ctl.rc_pace_max_segs;
22361 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
22362 if (orig_val != rack->r_ctl.rc_pace_max_segs)
22363 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0);
22367 rack_mtu_change(struct tcpcb *tp)
22370 * The MSS may have changed
22372 struct tcp_rack *rack;
22373 struct rack_sendmap *rsm;
22375 rack = (struct tcp_rack *)tp->t_fb_ptr;
22376 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) {
22378 * The MTU has changed we need to resend everything
22379 * since all we have sent is lost. We first fix
22380 * up the mtu though.
22382 rack_set_pace_segments(tp, rack, __LINE__, NULL);
22383 /* We treat this like a full retransmit timeout without the cwnd adjustment */
22384 rack_remxt_tmr(tp);
22385 rack->r_fast_output = 0;
22386 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp,
22387 rack->r_ctl.rc_sacked);
22388 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
22389 rack->r_must_retran = 1;
22390 /* Mark all inflight to needing to be rxt'd */
22391 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
22392 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG);
22395 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
22396 /* We don't use snd_nxt to retransmit */
22397 tp->snd_nxt = tp->snd_max;
22401 rack_set_dgp(struct tcp_rack *rack)
22403 /* pace_always=1 */
22404 if (rack->rc_always_pace == 0) {
22405 if (tcp_can_enable_pacing() == 0)
22409 rack->rc_always_pace = 1;
22410 rack->use_fixed_rate = 0;
22411 if (rack->gp_ready)
22412 rack_set_cc_pacing(rack);
22413 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
22414 rack->rack_attempt_hdwr_pace = 0;
22416 rack->full_size_rxt = 1;
22417 rack->shape_rxt_to_pacing_min = 0;
22419 rack->r_use_cmp_ack = 1;
22420 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
22421 rack->r_use_cmp_ack)
22422 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
22424 rack->rack_enable_scwnd = 1;
22426 rack->rc_gp_dyn_mul = 1;
22428 rack->r_ctl.rack_per_of_gp_ca = 100;
22430 rack->r_rr_config = 3;
22432 rack->r_ctl.rc_no_push_at_mrtt = 2;
22434 if (rack->r_cwnd_was_clamped == 0) {
22435 rack->rc_pace_to_cwnd = 1;
22437 rack->rc_pace_to_cwnd = 0;
22438 /* Reset all multipliers to 100.0 so just the measured bw */
22439 rack->r_ctl.rack_per_of_gp_ss = 100;
22440 rack->r_ctl.rack_per_of_gp_ca = 100;
22442 rack->rc_pace_fill_if_rttin_range = 0;
22443 rack->rtt_limit_mul = 0;
22445 rack->rack_no_prr = 1;
22447 rack->r_limit_scw = 1;
22449 rack->r_ctl.rack_per_of_gp_rec = 90;
22450 rack_client_buffer_level_set(rack);
22457 rack_set_profile(struct tcp_rack *rack, int prof)
22462 * Profile 1 is "standard" DGP. It ignores
22463 * client buffer level.
22465 rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL0;
22466 err = rack_set_dgp(rack);
22469 } else if (prof == 2) {
22471 * Profile 2 is DGP. Less aggressive with
22472 * respect to client buffer level.
22474 rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL1;
22475 err = rack_set_dgp(rack);
22478 } else if (prof == 3) {
22480 * Profile 3 is DGP. Even Less aggressive with
22481 * respect to client buffer level.
22483 rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL2;
22484 err = rack_set_dgp(rack);
22487 } else if (prof == 4) {
22489 * Profile 4 is DGP with the most responsiveness
22490 * to client buffer level.
22492 rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL3;
22493 err = rack_set_dgp(rack);
22496 } else if (prof == 0) {
22497 /* This changes things back to the default settings */
22499 rack->rc_hybrid_mode = 0;
22501 if (rack_fill_cw_state)
22502 rack->rc_pace_to_cwnd = 1;
22504 rack->rc_pace_to_cwnd = 0;
22505 if (rack->rc_always_pace) {
22506 tcp_decrement_paced_conn();
22507 rack_undo_cc_pacing(rack);
22508 rack->rc_always_pace = 0;
22510 if (rack_pace_every_seg && tcp_can_enable_pacing()) {
22511 rack->rc_always_pace = 1;
22512 if ((rack->gp_ready) && (rack->use_fixed_rate == 0))
22513 rack_set_cc_pacing(rack);
22515 rack->rc_always_pace = 0;
22516 if (rack_dsack_std_based & 0x1) {
22517 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */
22518 rack->rc_rack_tmr_std_based = 1;
22520 if (rack_dsack_std_based & 0x2) {
22521 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */
22522 rack->rc_rack_use_dsack = 1;
22524 if (rack_use_cmp_acks)
22525 rack->r_use_cmp_ack = 1;
22527 rack->r_use_cmp_ack = 0;
22528 if (rack_disable_prr)
22529 rack->rack_no_prr = 1;
22531 rack->rack_no_prr = 0;
22532 if (rack_gp_no_rec_chg)
22533 rack->rc_gp_no_rec_chg = 1;
22535 rack->rc_gp_no_rec_chg = 0;
22536 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) {
22537 rack->r_mbuf_queue = 1;
22538 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
22539 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
22540 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
22542 rack->r_mbuf_queue = 0;
22543 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
22545 if (rack_enable_shared_cwnd)
22546 rack->rack_enable_scwnd = 1;
22548 rack->rack_enable_scwnd = 0;
22549 if (rack_do_dyn_mul) {
22550 /* When dynamic adjustment is on CA needs to start at 100% */
22551 rack->rc_gp_dyn_mul = 1;
22552 if (rack_do_dyn_mul >= 100)
22553 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
22555 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
22556 rack->rc_gp_dyn_mul = 0;
22558 rack->r_rr_config = 0;
22559 rack->r_ctl.rc_no_push_at_mrtt = 0;
22560 rack->rc_pace_to_cwnd = 0;
22561 rack->rc_pace_fill_if_rttin_range = 0;
22562 rack->rtt_limit_mul = 0;
22564 if (rack_enable_hw_pacing)
22565 rack->rack_hdw_pace_ena = 1;
22567 rack->rack_hdw_pace_ena = 0;
22568 if (rack_disable_prr)
22569 rack->rack_no_prr = 1;
22571 rack->rack_no_prr = 0;
22572 if (rack_limits_scwnd)
22573 rack->r_limit_scw = 1;
22575 rack->r_limit_scw = 0;
22576 rack_init_retransmit_value(rack, rack_rxt_controls);
22583 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval)
22585 struct deferred_opt_list *dol;
22587 dol = malloc(sizeof(struct deferred_opt_list),
22588 M_TCPFSB, M_NOWAIT|M_ZERO);
22591 * No space yikes -- fail out..
22595 dol->optname = sopt_name;
22596 dol->optval = loptval;
22597 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next);
22602 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid)
22604 #ifdef TCP_REQUEST_TRK
22605 struct http_sendfile_track *sft;
22613 * If BB logging is not on we need to look at the DTL flag.
22614 * If its on already then those reasons override the DTL input.
22615 * We do this with any request, you can turn DTL on, but it does
22616 * not turn off at least from hybrid pacing requests.
22618 if (tcp_bblogging_on(rack->rc_tp) == 0) {
22619 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_DTL) {
22620 /* Turn on BB point logging */
22621 tcp_set_bblog_state(rack->rc_tp, TCP_LOG_VIA_BBPOINTS,
22622 TCP_BBPOINT_REQ_LEVEL_LOGGING);
22625 /* Make sure no fixed rate is on */
22626 rack->use_fixed_rate = 0;
22627 rack->r_ctl.rc_fixed_pacing_rate_rec = 0;
22628 rack->r_ctl.rc_fixed_pacing_rate_ca = 0;
22629 rack->r_ctl.rc_fixed_pacing_rate_ss = 0;
22630 /* Now allocate or find our entry that will have these settings */
22631 sft = tcp_http_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0);
22633 rack->rc_tp->tcp_hybrid_error++;
22634 /* no space, where would it have gone? */
22635 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc;
22636 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0);
22639 /* The seq will be snd_una + everything in the buffer */
22640 seq = sft->start_seq;
22641 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) {
22642 /* Disabling hybrid pacing */
22643 if (rack->rc_hybrid_mode) {
22644 rack_set_profile(rack, 0);
22645 rack->rc_tp->tcp_hybrid_stop++;
22647 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0);
22650 if (rack->dgp_on == 0) {
22652 * If we have not yet turned DGP on, do so
22653 * now setting pure DGP mode, no buffer level
22656 if ((err = rack_set_profile(rack, 1)) != 0){
22657 /* Failed to turn pacing on */
22658 rack->rc_tp->tcp_hybrid_error++;
22659 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0);
22663 /* Now set in our flags */
22664 sft->hybrid_flags = hybrid->hybrid_flags;
22665 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR)
22666 sft->cspr = hybrid->cspr;
22669 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS)
22670 sft->hint_maxseg = hybrid->hint_maxseg;
22672 sft->hint_maxseg = 0;
22673 rack->rc_hybrid_mode = 1;
22674 rack->rc_tp->tcp_hybrid_start++;
22675 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0);
22683 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name,
22684 uint32_t optval, uint64_t loptval, struct tcp_hybrid_req *hybrid)
22687 struct epoch_tracker et;
22688 struct sockopt sopt;
22689 struct cc_newreno_opts opt;
22690 struct inpcb *inp = tptoinpcb(tp);
22695 switch (sopt_name) {
22696 case TCP_RACK_SET_RXT_OPTIONS:
22697 if ((optval >= 0) && (optval <= 2)) {
22698 rack_init_retransmit_value(rack, optval);
22701 * You must send in 0, 1 or 2 all else is
22707 case TCP_RACK_DSACK_OPT:
22708 RACK_OPTS_INC(tcp_rack_dsack_opt);
22709 if (optval & 0x1) {
22710 rack->rc_rack_tmr_std_based = 1;
22712 rack->rc_rack_tmr_std_based = 0;
22714 if (optval & 0x2) {
22715 rack->rc_rack_use_dsack = 1;
22717 rack->rc_rack_use_dsack = 0;
22719 rack_log_dsack_event(rack, 5, __LINE__, 0, 0);
22721 case TCP_RACK_PACING_DIVISOR:
22722 RACK_OPTS_INC(tcp_rack_pacing_divisor);
22724 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor;
22726 if (optval < RL_MIN_DIVISOR)
22727 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR;
22729 rack->r_ctl.pace_len_divisor = optval;
22732 case TCP_RACK_HI_BETA:
22733 RACK_OPTS_INC(tcp_rack_hi_beta);
22735 rack->rack_hibeta = 1;
22737 rack->rack_hibeta = 0;
22739 case TCP_RACK_PACING_BETA:
22740 RACK_OPTS_INC(tcp_rack_beta);
22741 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) {
22742 /* This only works for newreno. */
22746 if (rack->rc_pacing_cc_set) {
22748 * Set them into the real CC module
22749 * whats in the rack pcb is the old values
22750 * to be used on restoral/
22752 sopt.sopt_dir = SOPT_SET;
22753 opt.name = CC_NEWRENO_BETA;
22755 if (CC_ALGO(tp)->ctl_output != NULL)
22756 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
22763 * Not pacing yet so set it into our local
22764 * rack pcb storage.
22766 rack->r_ctl.rc_saved_beta.beta = optval;
22769 case TCP_RACK_TIMER_SLOP:
22770 RACK_OPTS_INC(tcp_rack_timer_slop);
22771 rack->r_ctl.timer_slop = optval;
22772 if (rack->rc_tp->t_srtt) {
22774 * If we have an SRTT lets update t_rxtcur
22775 * to have the new slop.
22777 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
22778 rack_rto_min, rack_rto_max,
22779 rack->r_ctl.timer_slop);
22782 case TCP_RACK_PACING_BETA_ECN:
22783 RACK_OPTS_INC(tcp_rack_beta_ecn);
22784 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) {
22785 /* This only works for newreno. */
22789 if (rack->rc_pacing_cc_set) {
22791 * Set them into the real CC module
22792 * whats in the rack pcb is the old values
22793 * to be used on restoral/
22795 sopt.sopt_dir = SOPT_SET;
22796 opt.name = CC_NEWRENO_BETA_ECN;
22798 if (CC_ALGO(tp)->ctl_output != NULL)
22799 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
22804 * Not pacing yet so set it into our local
22805 * rack pcb storage.
22807 rack->r_ctl.rc_saved_beta.beta_ecn = optval;
22808 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED;
22811 case TCP_DEFER_OPTIONS:
22812 RACK_OPTS_INC(tcp_defer_opt);
22814 if (rack->gp_ready) {
22819 rack->defer_options = 1;
22821 rack->defer_options = 0;
22823 case TCP_RACK_MEASURE_CNT:
22824 RACK_OPTS_INC(tcp_rack_measure_cnt);
22825 if (optval && (optval <= 0xff)) {
22826 rack->r_ctl.req_measurements = optval;
22830 case TCP_REC_ABC_VAL:
22831 RACK_OPTS_INC(tcp_rec_abc_val);
22833 rack->r_use_labc_for_rec = 1;
22835 rack->r_use_labc_for_rec = 0;
22837 case TCP_RACK_ABC_VAL:
22838 RACK_OPTS_INC(tcp_rack_abc_val);
22839 if ((optval > 0) && (optval < 255))
22840 rack->rc_labc = optval;
22844 case TCP_HDWR_UP_ONLY:
22845 RACK_OPTS_INC(tcp_pacing_up_only);
22847 rack->r_up_only = 1;
22849 rack->r_up_only = 0;
22851 case TCP_PACING_RATE_CAP:
22852 RACK_OPTS_INC(tcp_pacing_rate_cap);
22853 rack->r_ctl.bw_rate_cap = loptval;
22855 case TCP_HYBRID_PACING:
22856 if (hybrid == NULL) {
22860 error = process_hybrid_pacing(rack, hybrid);
22862 case TCP_RACK_PROFILE:
22863 RACK_OPTS_INC(tcp_profile);
22864 error = rack_set_profile(rack, optval);
22866 case TCP_USE_CMP_ACKS:
22867 RACK_OPTS_INC(tcp_use_cmp_acks);
22868 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) {
22869 /* You can't turn it off once its on! */
22871 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) {
22872 rack->r_use_cmp_ack = 1;
22873 rack->r_mbuf_queue = 1;
22874 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
22876 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
22877 inp->inp_flags2 |= INP_MBUF_ACKCMP;
22879 case TCP_SHARED_CWND_TIME_LIMIT:
22880 RACK_OPTS_INC(tcp_lscwnd);
22882 rack->r_limit_scw = 1;
22884 rack->r_limit_scw = 0;
22886 case TCP_RACK_DGP_IN_REC:
22887 RACK_OPTS_INC(tcp_dgp_in_rec);
22889 rack->r_ctl.full_dgp_in_rec = 1;
22891 rack->r_ctl.full_dgp_in_rec = 0;
22893 case TCP_RXT_CLAMP:
22894 RACK_OPTS_INC(tcp_rxt_clamp);
22895 rack_translate_clamp_value(rack, optval);
22897 case TCP_RACK_PACE_TO_FILL:
22898 RACK_OPTS_INC(tcp_fillcw);
22900 rack->rc_pace_to_cwnd = 0;
22902 rack->rc_pace_to_cwnd = 1;
22904 rack->r_fill_less_agg = 1;
22906 if ((optval >= rack_gp_rtt_maxmul) &&
22907 rack_gp_rtt_maxmul &&
22909 rack->rc_pace_fill_if_rttin_range = 1;
22910 rack->rtt_limit_mul = optval;
22912 rack->rc_pace_fill_if_rttin_range = 0;
22913 rack->rtt_limit_mul = 0;
22916 case TCP_RACK_NO_PUSH_AT_MAX:
22917 RACK_OPTS_INC(tcp_npush);
22919 rack->r_ctl.rc_no_push_at_mrtt = 0;
22920 else if (optval < 0xff)
22921 rack->r_ctl.rc_no_push_at_mrtt = optval;
22925 case TCP_SHARED_CWND_ENABLE:
22926 RACK_OPTS_INC(tcp_rack_scwnd);
22928 rack->rack_enable_scwnd = 0;
22930 rack->rack_enable_scwnd = 1;
22932 case TCP_RACK_MBUF_QUEUE:
22933 /* Now do we use the LRO mbuf-queue feature */
22934 RACK_OPTS_INC(tcp_rack_mbufq);
22935 if (optval || rack->r_use_cmp_ack)
22936 rack->r_mbuf_queue = 1;
22938 rack->r_mbuf_queue = 0;
22939 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
22940 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
22942 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
22944 case TCP_RACK_NONRXT_CFG_RATE:
22945 RACK_OPTS_INC(tcp_rack_cfg_rate);
22947 rack->rack_rec_nonrxt_use_cr = 0;
22949 rack->rack_rec_nonrxt_use_cr = 1;
22952 RACK_OPTS_INC(tcp_rack_noprr);
22954 rack->rack_no_prr = 0;
22955 else if (optval == 1)
22956 rack->rack_no_prr = 1;
22957 else if (optval == 2)
22958 rack->no_prr_addback = 1;
22962 case TCP_TIMELY_DYN_ADJ:
22963 RACK_OPTS_INC(tcp_timely_dyn);
22965 rack->rc_gp_dyn_mul = 0;
22967 rack->rc_gp_dyn_mul = 1;
22968 if (optval >= 100) {
22970 * If the user sets something 100 or more
22971 * its the gp_ca value.
22973 rack->r_ctl.rack_per_of_gp_ca = optval;
22977 case TCP_RACK_DO_DETECTION:
22978 RACK_OPTS_INC(tcp_rack_do_detection);
22980 rack->do_detection = 0;
22982 rack->do_detection = 1;
22984 case TCP_RACK_TLP_USE:
22985 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
22989 RACK_OPTS_INC(tcp_tlp_use);
22990 rack->rack_tlp_threshold_use = optval;
22992 case TCP_RACK_TLP_REDUCE:
22993 /* RACK TLP cwnd reduction (bool) */
22994 RACK_OPTS_INC(tcp_rack_tlp_reduce);
22995 rack->r_ctl.rc_tlp_cwnd_reduce = optval;
22997 /* Pacing related ones */
22998 case TCP_RACK_PACE_ALWAYS:
23000 * zero is old rack method, 1 is new
23001 * method using a pacing rate.
23003 RACK_OPTS_INC(tcp_rack_pace_always);
23005 if (rack->rc_always_pace) {
23008 } else if (tcp_can_enable_pacing()) {
23009 rack->rc_always_pace = 1;
23010 if ((rack->gp_ready) && (rack->use_fixed_rate == 0))
23011 rack_set_cc_pacing(rack);
23018 if (rack->rc_always_pace) {
23019 tcp_decrement_paced_conn();
23020 rack->rc_always_pace = 0;
23021 rack_undo_cc_pacing(rack);
23024 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
23025 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
23027 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
23028 /* A rate may be set irate or other, if so set seg size */
23029 rack_update_seg(rack);
23031 case TCP_BBR_RACK_INIT_RATE:
23032 RACK_OPTS_INC(tcp_initial_rate);
23034 /* Change from kbits per second to bytes per second */
23037 rack->r_ctl.init_rate = val;
23038 if (rack->rc_init_win != rack_default_init_window) {
23042 * Options don't always get applied
23043 * in the order you think. So in order
23044 * to assure we update a cwnd we need
23045 * to check and see if we are still
23046 * where we should raise the cwnd.
23048 win = rc_init_window(rack);
23049 if (SEQ_GT(tp->snd_max, tp->iss))
23050 snt = tp->snd_max - tp->iss;
23054 (tp->snd_cwnd < win))
23055 tp->snd_cwnd = win;
23057 if (rack->rc_always_pace)
23058 rack_update_seg(rack);
23060 case TCP_BBR_IWINTSO:
23061 RACK_OPTS_INC(tcp_initial_win);
23062 if (optval && (optval <= 0xff)) {
23065 rack->rc_init_win = optval;
23066 win = rc_init_window(rack);
23067 if (SEQ_GT(tp->snd_max, tp->iss))
23068 snt = tp->snd_max - tp->iss;
23073 rack->r_ctl.init_rate)) {
23075 * We are not past the initial window
23076 * and we have some bases for pacing,
23077 * so we need to possibly adjust up
23078 * the cwnd. Note even if we don't set
23079 * the cwnd, its still ok to raise the rc_init_win
23080 * which can be used coming out of idle when we
23081 * would have a rate.
23083 if (tp->snd_cwnd < win)
23084 tp->snd_cwnd = win;
23086 if (rack->rc_always_pace)
23087 rack_update_seg(rack);
23091 case TCP_RACK_FORCE_MSEG:
23092 RACK_OPTS_INC(tcp_rack_force_max_seg);
23094 rack->rc_force_max_seg = 1;
23096 rack->rc_force_max_seg = 0;
23098 case TCP_RACK_PACE_MIN_SEG:
23099 RACK_OPTS_INC(tcp_rack_min_seg);
23100 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval);
23101 rack_set_pace_segments(tp, rack, __LINE__, NULL);
23103 case TCP_RACK_PACE_MAX_SEG:
23104 /* Max segments size in a pace in bytes */
23105 RACK_OPTS_INC(tcp_rack_max_seg);
23106 rack->rc_user_set_max_segs = optval;
23107 rack_set_pace_segments(tp, rack, __LINE__, NULL);
23109 case TCP_RACK_PACE_RATE_REC:
23110 /* Set the fixed pacing rate in Bytes per second ca */
23111 RACK_OPTS_INC(tcp_rack_pace_rate_rec);
23112 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
23113 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
23114 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
23115 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
23116 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
23117 rack->use_fixed_rate = 1;
23118 if (rack->rc_always_pace && rack->gp_ready && rack->rack_hibeta)
23119 rack_set_cc_pacing(rack);
23120 rack_log_pacing_delay_calc(rack,
23121 rack->r_ctl.rc_fixed_pacing_rate_ss,
23122 rack->r_ctl.rc_fixed_pacing_rate_ca,
23123 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
23127 case TCP_RACK_PACE_RATE_SS:
23128 /* Set the fixed pacing rate in Bytes per second ca */
23129 RACK_OPTS_INC(tcp_rack_pace_rate_ss);
23130 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
23131 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
23132 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
23133 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
23134 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
23135 rack->use_fixed_rate = 1;
23136 if (rack->rc_always_pace && rack->gp_ready && rack->rack_hibeta)
23137 rack_set_cc_pacing(rack);
23138 rack_log_pacing_delay_calc(rack,
23139 rack->r_ctl.rc_fixed_pacing_rate_ss,
23140 rack->r_ctl.rc_fixed_pacing_rate_ca,
23141 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
23142 __LINE__, NULL, 0);
23145 case TCP_RACK_PACE_RATE_CA:
23146 /* Set the fixed pacing rate in Bytes per second ca */
23147 RACK_OPTS_INC(tcp_rack_pace_rate_ca);
23148 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
23149 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
23150 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
23151 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
23152 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
23153 rack->use_fixed_rate = 1;
23154 if (rack->rc_always_pace && rack->gp_ready && rack->rack_hibeta)
23155 rack_set_cc_pacing(rack);
23156 rack_log_pacing_delay_calc(rack,
23157 rack->r_ctl.rc_fixed_pacing_rate_ss,
23158 rack->r_ctl.rc_fixed_pacing_rate_ca,
23159 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
23160 __LINE__, NULL, 0);
23162 case TCP_RACK_GP_INCREASE_REC:
23163 RACK_OPTS_INC(tcp_gp_inc_rec);
23164 rack->r_ctl.rack_per_of_gp_rec = optval;
23165 rack_log_pacing_delay_calc(rack,
23166 rack->r_ctl.rack_per_of_gp_ss,
23167 rack->r_ctl.rack_per_of_gp_ca,
23168 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
23169 __LINE__, NULL, 0);
23171 case TCP_RACK_GP_INCREASE_CA:
23172 RACK_OPTS_INC(tcp_gp_inc_ca);
23176 * We don't allow any reduction
23182 rack->r_ctl.rack_per_of_gp_ca = ca;
23183 rack_log_pacing_delay_calc(rack,
23184 rack->r_ctl.rack_per_of_gp_ss,
23185 rack->r_ctl.rack_per_of_gp_ca,
23186 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
23187 __LINE__, NULL, 0);
23189 case TCP_RACK_GP_INCREASE_SS:
23190 RACK_OPTS_INC(tcp_gp_inc_ss);
23194 * We don't allow any reduction
23200 rack->r_ctl.rack_per_of_gp_ss = ss;
23201 rack_log_pacing_delay_calc(rack,
23202 rack->r_ctl.rack_per_of_gp_ss,
23203 rack->r_ctl.rack_per_of_gp_ca,
23204 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
23205 __LINE__, NULL, 0);
23207 case TCP_RACK_RR_CONF:
23208 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate);
23209 if (optval && optval <= 3)
23210 rack->r_rr_config = optval;
23212 rack->r_rr_config = 0;
23214 case TCP_PACING_DND: /* URL:dnd */
23216 rack->rc_pace_dnd = 1;
23218 rack->rc_pace_dnd = 0;
23220 case TCP_HDWR_RATE_CAP:
23221 RACK_OPTS_INC(tcp_hdwr_rate_cap);
23223 if (rack->r_rack_hw_rate_caps == 0)
23224 rack->r_rack_hw_rate_caps = 1;
23228 rack->r_rack_hw_rate_caps = 0;
23231 case TCP_RACK_SPLIT_LIMIT:
23232 RACK_OPTS_INC(tcp_split_limit);
23233 rack->r_ctl.rc_split_limit = optval;
23235 case TCP_BBR_HDWR_PACE:
23236 RACK_OPTS_INC(tcp_hdwr_pacing);
23238 if (rack->rack_hdrw_pacing == 0) {
23239 rack->rack_hdw_pace_ena = 1;
23240 rack->rack_attempt_hdwr_pace = 0;
23244 rack->rack_hdw_pace_ena = 0;
23246 if (rack->r_ctl.crte != NULL) {
23247 rack->rack_hdrw_pacing = 0;
23248 rack->rack_attempt_hdwr_pace = 0;
23249 tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
23250 rack->r_ctl.crte = NULL;
23255 /* End Pacing related ones */
23256 case TCP_RACK_PRR_SENDALOT:
23257 /* Allow PRR to send more than one seg */
23258 RACK_OPTS_INC(tcp_rack_prr_sendalot);
23259 rack->r_ctl.rc_prr_sendalot = optval;
23261 case TCP_RACK_MIN_TO:
23262 /* Minimum time between rack t-o's in ms */
23263 RACK_OPTS_INC(tcp_rack_min_to);
23264 rack->r_ctl.rc_min_to = optval;
23266 case TCP_RACK_EARLY_SEG:
23267 /* If early recovery max segments */
23268 RACK_OPTS_INC(tcp_rack_early_seg);
23269 rack->r_ctl.rc_early_recovery_segs = optval;
23271 case TCP_RACK_ENABLE_HYSTART:
23274 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED;
23275 if (rack_do_hystart > RACK_HYSTART_ON)
23276 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND;
23277 if (rack_do_hystart > RACK_HYSTART_ON_W_SC)
23278 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH;
23280 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH);
23284 case TCP_RACK_REORD_THRESH:
23285 /* RACK reorder threshold (shift amount) */
23286 RACK_OPTS_INC(tcp_rack_reord_thresh);
23287 if ((optval > 0) && (optval < 31))
23288 rack->r_ctl.rc_reorder_shift = optval;
23292 case TCP_RACK_REORD_FADE:
23293 /* Does reordering fade after ms time */
23294 RACK_OPTS_INC(tcp_rack_reord_fade);
23295 rack->r_ctl.rc_reorder_fade = optval;
23297 case TCP_RACK_TLP_THRESH:
23298 /* RACK TLP theshold i.e. srtt+(srtt/N) */
23299 RACK_OPTS_INC(tcp_rack_tlp_thresh);
23301 rack->r_ctl.rc_tlp_threshold = optval;
23305 case TCP_BBR_USE_RACK_RR:
23306 RACK_OPTS_INC(tcp_rack_rr);
23308 rack->use_rack_rr = 1;
23310 rack->use_rack_rr = 0;
23312 case TCP_RACK_PKT_DELAY:
23313 /* RACK added ms i.e. rack-rtt + reord + N */
23314 RACK_OPTS_INC(tcp_rack_pkt_delay);
23315 rack->r_ctl.rc_pkt_delay = optval;
23318 RACK_OPTS_INC(tcp_rack_delayed_ack);
23320 tp->t_delayed_ack = 0;
23322 tp->t_delayed_ack = 1;
23323 if (tp->t_flags & TF_DELACK) {
23324 tp->t_flags &= ~TF_DELACK;
23325 tp->t_flags |= TF_ACKNOW;
23326 NET_EPOCH_ENTER(et);
23328 NET_EPOCH_EXIT(et);
23332 case TCP_BBR_RACK_RTT_USE:
23333 RACK_OPTS_INC(tcp_rack_rtt_use);
23334 if ((optval != USE_RTT_HIGH) &&
23335 (optval != USE_RTT_LOW) &&
23336 (optval != USE_RTT_AVG))
23339 rack->r_ctl.rc_rate_sample_method = optval;
23341 case TCP_DATA_AFTER_CLOSE:
23342 RACK_OPTS_INC(tcp_data_after_close);
23344 rack->rc_allow_data_af_clo = 1;
23346 rack->rc_allow_data_af_clo = 0;
23351 tcp_log_socket_option(tp, sopt_name, optval, error);
23357 rack_apply_deferred_options(struct tcp_rack *rack)
23359 struct deferred_opt_list *dol, *sdol;
23362 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) {
23363 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
23364 /* Disadvantage of deferal is you loose the error return */
23365 s_optval = (uint32_t)dol->optval;
23366 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL);
23367 free(dol, M_TCPDO);
23372 rack_hw_tls_change(struct tcpcb *tp, int chg)
23374 /* Update HW tls state */
23375 struct tcp_rack *rack;
23377 rack = (struct tcp_rack *)tp->t_fb_ptr;
23379 rack->r_ctl.fsb.hw_tls = 1;
23381 rack->r_ctl.fsb.hw_tls = 0;
23385 rack_pru_options(struct tcpcb *tp, int flags)
23387 if (flags & PRUS_OOB)
23388 return (EOPNOTSUPP);
23393 rack_wake_check(struct tcpcb *tp)
23395 struct tcp_rack *rack;
23399 rack = (struct tcp_rack *)tp->t_fb_ptr;
23400 if (rack->r_ctl.rc_hpts_flags) {
23401 cts = tcp_get_usecs(&tv);
23402 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){
23404 * Pacing timer is up, check if we are ready.
23406 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to))
23408 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) {
23410 * A timer is up, check if we are ready.
23412 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp))
23419 static struct tcp_function_block __tcp_rack = {
23420 .tfb_tcp_block_name = __XSTRING(STACKNAME),
23421 .tfb_tcp_output = rack_output,
23422 .tfb_do_queued_segments = ctf_do_queued_segments,
23423 .tfb_do_segment_nounlock = rack_do_segment_nounlock,
23424 .tfb_tcp_do_segment = rack_do_segment,
23425 .tfb_tcp_ctloutput = rack_ctloutput,
23426 .tfb_tcp_fb_init = rack_init,
23427 .tfb_tcp_fb_fini = rack_fini,
23428 .tfb_tcp_timer_stop_all = rack_stopall,
23429 .tfb_tcp_rexmit_tmr = rack_remxt_tmr,
23430 .tfb_tcp_handoff_ok = rack_handoff_ok,
23431 .tfb_tcp_mtu_chg = rack_mtu_change,
23432 .tfb_pru_options = rack_pru_options,
23433 .tfb_hwtls_change = rack_hw_tls_change,
23434 .tfb_chg_query = rack_chg_query,
23435 .tfb_switch_failed = rack_switch_failed,
23436 .tfb_early_wake_check = rack_wake_check,
23437 .tfb_compute_pipe = rack_compute_pipe,
23438 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP,
23442 * rack_ctloutput() must drop the inpcb lock before performing copyin on
23443 * socket option arguments. When it re-acquires the lock after the copy, it
23444 * has to revalidate that the connection is still valid for the socket
23448 rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt)
23450 struct inpcb *inp = tptoinpcb(tp);
23452 struct ip6_hdr *ip6;
23453 int32_t mask, tclass;
23458 struct tcp_rack *rack;
23459 struct tcp_hybrid_req hybrid;
23461 int32_t error = 0, optval;
23463 rack = (struct tcp_rack *)tp->t_fb_ptr;
23464 if (rack == NULL) {
23469 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
23472 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
23475 switch (sopt->sopt_level) {
23478 MPASS(inp->inp_vflag & INP_IPV6PROTO);
23479 switch (sopt->sopt_name) {
23480 case IPV6_USE_MIN_MTU:
23481 tcp6_use_min_mtu(tp);
23485 * The DSCP codepoint has changed, update the fsb
23486 * by overwriting any previous traffic class.
23488 if (inp->in6p_outputopts) {
23490 tclass = inp->in6p_outputopts->ip6po_tclass;
23491 ip6->ip6_flow &= htonl((~mask) << 20);
23492 ip6->ip6_flow |= htonl((tclass & mask) << 20);
23501 switch (sopt->sopt_name) {
23504 * The DSCP codepoint has changed, update the fsb.
23506 ip->ip_tos = rack->rc_inp->inp_ip_tos;
23510 * The TTL has changed, update the fsb.
23512 ip->ip_ttl = rack->rc_inp->inp_ip_ttl;
23520 switch (sopt->sopt_name) {
23521 case SO_PEERPRIO: /* SC-URL:bs */
23522 /* Already read in and sanity checked in sosetopt(). */
23523 if (inp->inp_socket) {
23524 rack->client_bufferlvl = inp->inp_socket->so_peerprio;
23525 rack_client_buffer_level_set(rack);
23533 switch (sopt->sopt_name) {
23534 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */
23535 /* Pacing related ones */
23536 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */
23537 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */
23538 case TCP_BBR_IWINTSO: /* URL:tso_iwin */
23539 case TCP_RACK_PACE_MIN_SEG: /* URL:pace_min_seg */
23540 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */
23541 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */
23542 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */
23543 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/
23544 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */
23545 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */
23546 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */
23547 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */
23548 case TCP_RACK_RR_CONF: /* URL:rrr_conf */
23549 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */
23550 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */
23551 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */
23552 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */
23553 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */
23554 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */
23555 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */
23556 case TCP_RACK_DGP_IN_REC: /* URL:dgpinrec */
23557 /* End pacing related */
23558 case TCP_RXT_CLAMP: /* URL:rxtclamp */
23559 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */
23560 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */
23561 case TCP_RACK_MIN_TO: /* URL:min_to */
23562 case TCP_RACK_EARLY_SEG: /* URL:early_seg */
23563 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */
23564 case TCP_RACK_REORD_FADE: /* URL:reord_fade */
23565 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */
23566 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */
23567 case TCP_RACK_TLP_USE: /* URL:tlp_use */
23568 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */
23569 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */
23570 case TCP_RACK_DO_DETECTION: /* URL:detect */
23571 case TCP_NO_PRR: /* URL:noprr */
23572 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */
23573 case TCP_DATA_AFTER_CLOSE: /* no URL */
23574 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */
23575 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */
23576 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */
23577 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */
23578 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */
23579 case TCP_RACK_PROFILE: /* URL:profile */
23580 case TCP_HYBRID_PACING: /* URL:hybrid */
23581 case TCP_USE_CMP_ACKS: /* URL:cmpack */
23582 case TCP_RACK_ABC_VAL: /* URL:labc */
23583 case TCP_REC_ABC_VAL: /* URL:reclabc */
23584 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */
23585 case TCP_DEFER_OPTIONS: /* URL:defer */
23586 case TCP_RACK_DSACK_OPT: /* URL:dsack */
23587 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */
23588 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */
23589 case TCP_RACK_SET_RXT_OPTIONS: /* URL:rxtsz */
23590 case TCP_RACK_HI_BETA: /* URL:hibeta */
23591 case TCP_RACK_SPLIT_LIMIT: /* URL:split */
23592 case TCP_RACK_PACING_DIVISOR: /* URL:divisor */
23593 case TCP_PACING_DND: /* URL:dnd */
23597 /* Filter off all unknown options to the base stack */
23598 return (tcp_default_ctloutput(tp, sopt));
23608 if (sopt->sopt_name == TCP_PACING_RATE_CAP) {
23609 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval));
23611 * We truncate it down to 32 bits for the socket-option trace this
23612 * means rates > 34Gbps won't show right, but thats probably ok.
23614 optval = (uint32_t)loptval;
23615 } else if (sopt->sopt_name == TCP_HYBRID_PACING) {
23616 error = sooptcopyin(sopt, &hybrid, sizeof(hybrid), sizeof(hybrid));
23618 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
23619 /* Save it in 64 bit form too */
23625 if (tp->t_fb != &__tcp_rack) {
23627 return (ENOPROTOOPT);
23629 if (rack->defer_options && (rack->gp_ready == 0) &&
23630 (sopt->sopt_name != TCP_DEFER_OPTIONS) &&
23631 (sopt->sopt_name != TCP_HYBRID_PACING) &&
23632 (sopt->sopt_name != TCP_RACK_PACING_BETA) &&
23633 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) &&
23634 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) &&
23635 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) {
23636 /* Options are beind deferred */
23637 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) {
23641 /* No memory to defer, fail */
23646 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid);
23652 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti)
23655 INP_WLOCK_ASSERT(tptoinpcb(tp));
23656 bzero(ti, sizeof(*ti));
23658 ti->tcpi_state = tp->t_state;
23659 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
23660 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
23661 if (tp->t_flags & TF_SACK_PERMIT)
23662 ti->tcpi_options |= TCPI_OPT_SACK;
23663 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
23664 ti->tcpi_options |= TCPI_OPT_WSCALE;
23665 ti->tcpi_snd_wscale = tp->snd_scale;
23666 ti->tcpi_rcv_wscale = tp->rcv_scale;
23668 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))
23669 ti->tcpi_options |= TCPI_OPT_ECN;
23670 if (tp->t_flags & TF_FASTOPEN)
23671 ti->tcpi_options |= TCPI_OPT_TFO;
23672 /* still kept in ticks is t_rcvtime */
23673 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick;
23674 /* Since we hold everything in precise useconds this is easy */
23675 ti->tcpi_rtt = tp->t_srtt;
23676 ti->tcpi_rttvar = tp->t_rttvar;
23677 ti->tcpi_rto = tp->t_rxtcur;
23678 ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
23679 ti->tcpi_snd_cwnd = tp->snd_cwnd;
23681 * FreeBSD-specific extension fields for tcp_info.
23683 ti->tcpi_rcv_space = tp->rcv_wnd;
23684 ti->tcpi_rcv_nxt = tp->rcv_nxt;
23685 ti->tcpi_snd_wnd = tp->snd_wnd;
23686 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */
23687 ti->tcpi_snd_nxt = tp->snd_nxt;
23688 ti->tcpi_snd_mss = tp->t_maxseg;
23689 ti->tcpi_rcv_mss = tp->t_maxseg;
23690 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack;
23691 ti->tcpi_rcv_ooopack = tp->t_rcvoopack;
23692 ti->tcpi_snd_zerowin = tp->t_sndzerowin;
23693 ti->tcpi_total_tlp = tp->t_sndtlppack;
23694 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte;
23695 #ifdef NETFLIX_STATS
23696 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo));
23699 if (tp->t_flags & TF_TOE) {
23700 ti->tcpi_options |= TCPI_OPT_TOE;
23701 tcp_offload_tcp_info(tp, ti);
23707 rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt)
23709 struct inpcb *inp = tptoinpcb(tp);
23710 struct tcp_rack *rack;
23711 int32_t error, optval;
23712 uint64_t val, loptval;
23713 struct tcp_info ti;
23715 * Because all our options are either boolean or an int, we can just
23716 * pull everything into optval and then unlock and copy. If we ever
23717 * add a option that is not a int, then this will have quite an
23718 * impact to this routine.
23721 rack = (struct tcp_rack *)tp->t_fb_ptr;
23722 if (rack == NULL) {
23726 switch (sopt->sopt_name) {
23728 /* First get the info filled */
23729 rack_fill_info(tp, &ti);
23730 /* Fix up the rtt related fields if needed */
23732 error = sooptcopyout(sopt, &ti, sizeof ti);
23735 * Beta is the congestion control value for NewReno that influences how
23736 * much of a backoff happens when loss is detected. It is normally set
23737 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value
23738 * when you exit recovery.
23740 case TCP_RACK_PACING_BETA:
23741 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0)
23743 else if (rack->rc_pacing_cc_set == 0)
23744 optval = rack->r_ctl.rc_saved_beta.beta;
23747 * Reach out into the CC data and report back what
23748 * I have previously set. Yeah it looks hackish but
23749 * we don't want to report the saved values.
23751 if (tp->t_ccv.cc_data)
23752 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta;
23758 * Beta_ecn is the congestion control value for NewReno that influences how
23759 * much of a backoff happens when a ECN mark is detected. It is normally set
23760 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when
23761 * you exit recovery. Note that classic ECN has a beta of 50, it is only
23762 * ABE Ecn that uses this "less" value, but we do too with pacing :)
23765 case TCP_RACK_PACING_BETA_ECN:
23766 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0)
23768 else if (rack->rc_pacing_cc_set == 0)
23769 optval = rack->r_ctl.rc_saved_beta.beta_ecn;
23772 * Reach out into the CC data and report back what
23773 * I have previously set. Yeah it looks hackish but
23774 * we don't want to report the saved values.
23776 if (tp->t_ccv.cc_data)
23777 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn;
23782 case TCP_RACK_DSACK_OPT:
23784 if (rack->rc_rack_tmr_std_based) {
23787 if (rack->rc_rack_use_dsack) {
23791 case TCP_RACK_ENABLE_HYSTART:
23793 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) {
23794 optval = RACK_HYSTART_ON;
23795 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND)
23796 optval = RACK_HYSTART_ON_W_SC;
23797 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH)
23798 optval = RACK_HYSTART_ON_W_SC_C;
23800 optval = RACK_HYSTART_OFF;
23804 case TCP_RACK_DGP_IN_REC:
23805 optval = rack->r_ctl.full_dgp_in_rec;
23807 case TCP_RACK_HI_BETA:
23808 optval = rack->rack_hibeta;
23810 case TCP_RXT_CLAMP:
23811 optval = rack->r_ctl.saved_rxt_clamp_val;
23813 case TCP_DEFER_OPTIONS:
23814 optval = rack->defer_options;
23816 case TCP_RACK_MEASURE_CNT:
23817 optval = rack->r_ctl.req_measurements;
23819 case TCP_REC_ABC_VAL:
23820 optval = rack->r_use_labc_for_rec;
23822 case TCP_RACK_ABC_VAL:
23823 optval = rack->rc_labc;
23825 case TCP_HDWR_UP_ONLY:
23826 optval= rack->r_up_only;
23828 case TCP_PACING_RATE_CAP:
23829 loptval = rack->r_ctl.bw_rate_cap;
23831 case TCP_RACK_PROFILE:
23832 /* You cannot retrieve a profile, its write only */
23835 case TCP_HYBRID_PACING:
23836 /* You cannot retrieve hybrid pacing information, its write only */
23839 case TCP_USE_CMP_ACKS:
23840 optval = rack->r_use_cmp_ack;
23842 case TCP_RACK_PACE_TO_FILL:
23843 optval = rack->rc_pace_to_cwnd;
23844 if (optval && rack->r_fill_less_agg)
23847 case TCP_RACK_NO_PUSH_AT_MAX:
23848 optval = rack->r_ctl.rc_no_push_at_mrtt;
23850 case TCP_SHARED_CWND_ENABLE:
23851 optval = rack->rack_enable_scwnd;
23853 case TCP_RACK_NONRXT_CFG_RATE:
23854 optval = rack->rack_rec_nonrxt_use_cr;
23857 if (rack->rack_no_prr == 1)
23859 else if (rack->no_prr_addback == 1)
23864 case TCP_RACK_DO_DETECTION:
23865 optval = rack->do_detection;
23867 case TCP_RACK_MBUF_QUEUE:
23868 /* Now do we use the LRO mbuf-queue feature */
23869 optval = rack->r_mbuf_queue;
23871 case TCP_TIMELY_DYN_ADJ:
23872 optval = rack->rc_gp_dyn_mul;
23874 case TCP_BBR_IWINTSO:
23875 optval = rack->rc_init_win;
23877 case TCP_RACK_TLP_REDUCE:
23878 /* RACK TLP cwnd reduction (bool) */
23879 optval = rack->r_ctl.rc_tlp_cwnd_reduce;
23881 case TCP_BBR_RACK_INIT_RATE:
23882 val = rack->r_ctl.init_rate;
23883 /* convert to kbits per sec */
23886 optval = (uint32_t)val;
23888 case TCP_RACK_FORCE_MSEG:
23889 optval = rack->rc_force_max_seg;
23891 case TCP_RACK_PACE_MIN_SEG:
23892 optval = rack->r_ctl.rc_user_set_min_segs;
23894 case TCP_RACK_PACE_MAX_SEG:
23895 /* Max segments in a pace */
23896 optval = rack->rc_user_set_max_segs;
23898 case TCP_RACK_PACE_ALWAYS:
23899 /* Use the always pace method */
23900 optval = rack->rc_always_pace;
23902 case TCP_RACK_PRR_SENDALOT:
23903 /* Allow PRR to send more than one seg */
23904 optval = rack->r_ctl.rc_prr_sendalot;
23906 case TCP_RACK_MIN_TO:
23907 /* Minimum time between rack t-o's in ms */
23908 optval = rack->r_ctl.rc_min_to;
23910 case TCP_RACK_SPLIT_LIMIT:
23911 optval = rack->r_ctl.rc_split_limit;
23913 case TCP_RACK_EARLY_SEG:
23914 /* If early recovery max segments */
23915 optval = rack->r_ctl.rc_early_recovery_segs;
23917 case TCP_RACK_REORD_THRESH:
23918 /* RACK reorder threshold (shift amount) */
23919 optval = rack->r_ctl.rc_reorder_shift;
23921 case TCP_RACK_REORD_FADE:
23922 /* Does reordering fade after ms time */
23923 optval = rack->r_ctl.rc_reorder_fade;
23925 case TCP_BBR_USE_RACK_RR:
23926 /* Do we use the rack cheat for rxt */
23927 optval = rack->use_rack_rr;
23929 case TCP_RACK_RR_CONF:
23930 optval = rack->r_rr_config;
23932 case TCP_HDWR_RATE_CAP:
23933 optval = rack->r_rack_hw_rate_caps;
23935 case TCP_BBR_HDWR_PACE:
23936 optval = rack->rack_hdw_pace_ena;
23938 case TCP_RACK_TLP_THRESH:
23939 /* RACK TLP theshold i.e. srtt+(srtt/N) */
23940 optval = rack->r_ctl.rc_tlp_threshold;
23942 case TCP_RACK_PKT_DELAY:
23943 /* RACK added ms i.e. rack-rtt + reord + N */
23944 optval = rack->r_ctl.rc_pkt_delay;
23946 case TCP_RACK_TLP_USE:
23947 optval = rack->rack_tlp_threshold_use;
23949 case TCP_PACING_DND:
23950 optval = rack->rc_pace_dnd;
23952 case TCP_RACK_PACE_RATE_CA:
23953 optval = rack->r_ctl.rc_fixed_pacing_rate_ca;
23955 case TCP_RACK_PACE_RATE_SS:
23956 optval = rack->r_ctl.rc_fixed_pacing_rate_ss;
23958 case TCP_RACK_PACE_RATE_REC:
23959 optval = rack->r_ctl.rc_fixed_pacing_rate_rec;
23961 case TCP_RACK_GP_INCREASE_SS:
23962 optval = rack->r_ctl.rack_per_of_gp_ca;
23964 case TCP_RACK_GP_INCREASE_CA:
23965 optval = rack->r_ctl.rack_per_of_gp_ss;
23967 case TCP_RACK_PACING_DIVISOR:
23968 optval = rack->r_ctl.pace_len_divisor;
23970 case TCP_BBR_RACK_RTT_USE:
23971 optval = rack->r_ctl.rc_rate_sample_method;
23974 optval = tp->t_delayed_ack;
23976 case TCP_DATA_AFTER_CLOSE:
23977 optval = rack->rc_allow_data_af_clo;
23979 case TCP_SHARED_CWND_TIME_LIMIT:
23980 optval = rack->r_limit_scw;
23982 case TCP_RACK_TIMER_SLOP:
23983 optval = rack->r_ctl.timer_slop;
23986 return (tcp_default_ctloutput(tp, sopt));
23991 if (TCP_PACING_RATE_CAP)
23992 error = sooptcopyout(sopt, &loptval, sizeof loptval);
23994 error = sooptcopyout(sopt, &optval, sizeof optval);
24000 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt)
24002 if (sopt->sopt_dir == SOPT_SET) {
24003 return (rack_set_sockopt(tp, sopt));
24004 } else if (sopt->sopt_dir == SOPT_GET) {
24005 return (rack_get_sockopt(tp, sopt));
24007 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir);
24011 static const char *rack_stack_names[] = {
24012 __XSTRING(STACKNAME),
24014 __XSTRING(STACKALIAS),
24019 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
24021 memset(mem, 0, size);
24026 rack_dtor(void *mem, int32_t size, void *arg)
24031 static bool rack_mod_inited = false;
24034 tcp_addrack(module_t mod, int32_t type, void *data)
24041 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
24042 sizeof(struct rack_sendmap),
24043 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
24045 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
24046 sizeof(struct tcp_rack),
24047 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
24049 sysctl_ctx_init(&rack_sysctl_ctx);
24050 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
24051 SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
24054 __XSTRING(STACKALIAS),
24056 __XSTRING(STACKNAME),
24058 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
24060 if (rack_sysctl_root == NULL) {
24061 printf("Failed to add sysctl node\n");
24065 rack_init_sysctls();
24066 num_stacks = nitems(rack_stack_names);
24067 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
24068 rack_stack_names, &num_stacks);
24070 printf("Failed to register %s stack name for "
24071 "%s module\n", rack_stack_names[num_stacks],
24072 __XSTRING(MODNAME));
24073 sysctl_ctx_free(&rack_sysctl_ctx);
24075 uma_zdestroy(rack_zone);
24076 uma_zdestroy(rack_pcb_zone);
24077 rack_counter_destroy();
24078 printf("Failed to register rack module -- err:%d\n", err);
24081 tcp_lro_reg_mbufq();
24082 rack_mod_inited = true;
24085 err = deregister_tcp_functions(&__tcp_rack, true, false);
24088 err = deregister_tcp_functions(&__tcp_rack, false, true);
24091 if (rack_mod_inited) {
24092 uma_zdestroy(rack_zone);
24093 uma_zdestroy(rack_pcb_zone);
24094 sysctl_ctx_free(&rack_sysctl_ctx);
24095 rack_counter_destroy();
24096 rack_mod_inited = false;
24098 tcp_lro_dereg_mbufq();
24102 return (EOPNOTSUPP);
24107 static moduledata_t tcp_rack = {
24108 .name = __XSTRING(MODNAME),
24109 .evhand = tcp_addrack,
24113 MODULE_VERSION(MODNAME, 1);
24114 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
24115 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);
24117 #endif /* #if !defined(INET) && !defined(INET6) */