2 * Copyright (c) 2016-2020 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include "opt_inet6.h"
32 #include "opt_ipsec.h"
33 #include "opt_tcpdebug.h"
34 #include "opt_ratelimit.h"
35 #include "opt_kern_tls.h"
36 #include <sys/param.h>
38 #include <sys/module.h>
39 #include <sys/kernel.h>
41 #include <sys/hhook.h>
44 #include <sys/malloc.h>
46 #include <sys/mutex.h>
48 #include <sys/proc.h> /* for proc0 declaration */
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
54 #include <sys/qmath.h>
56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
60 #include <sys/refcount.h>
61 #include <sys/queue.h>
62 #include <sys/tim_filter.h>
64 #include <sys/kthread.h>
65 #include <sys/kern_prefetch.h>
66 #include <sys/protosw.h>
68 #include <sys/sched.h>
69 #include <machine/cpu.h>
73 #include <net/route.h>
74 #include <net/route/nhop.h>
77 #define TCPSTATES /* for logging */
79 #include <netinet/in.h>
80 #include <netinet/in_kdtrace.h>
81 #include <netinet/in_pcb.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
85 #include <netinet/ip_var.h>
86 #include <netinet/ip6.h>
87 #include <netinet6/in6_pcb.h>
88 #include <netinet6/ip6_var.h>
89 #include <netinet/tcp.h>
91 #include <netinet/tcp_fsm.h>
92 #include <netinet/tcp_log_buf.h>
93 #include <netinet/tcp_seq.h>
94 #include <netinet/tcp_timer.h>
95 #include <netinet/tcp_var.h>
96 #include <netinet/tcp_hpts.h>
97 #include <netinet/tcp_ratelimit.h>
98 #include <netinet/tcp_accounting.h>
99 #include <netinet/tcpip.h>
100 #include <netinet/cc/cc.h>
101 #include <netinet/cc/cc_newreno.h>
102 #include <netinet/tcp_fastopen.h>
103 #include <netinet/tcp_lro.h>
104 #ifdef NETFLIX_SHARED_CWND
105 #include <netinet/tcp_shared_cwnd.h>
108 #include <netinet/tcp_debug.h>
109 #endif /* TCPDEBUG */
111 #include <netinet/tcp_offload.h>
114 #include <netinet6/tcp6_var.h>
117 #include <netipsec/ipsec_support.h>
119 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
120 #include <netipsec/ipsec.h>
121 #include <netipsec/ipsec6.h>
124 #include <netinet/udp.h>
125 #include <netinet/udp_var.h>
126 #include <machine/in_cksum.h>
129 #include <security/mac/mac_framework.h>
131 #include "sack_filter.h"
132 #include "tcp_rack.h"
133 #include "rack_bbr_common.h"
135 uma_zone_t rack_zone;
136 uma_zone_t rack_pcb_zone;
139 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t)))
142 VNET_DECLARE(uint32_t, newreno_beta);
143 VNET_DECLARE(uint32_t, newreno_beta_ecn);
144 #define V_newreno_beta VNET(newreno_beta)
145 #define V_newreno_beta_ecn VNET(newreno_beta_ecn)
148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block");
149 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options");
151 struct sysctl_ctx_list rack_sysctl_ctx;
152 struct sysctl_oid *rack_sysctl_root;
158 * The RACK module incorporates a number of
159 * TCP ideas that have been put out into the IETF
160 * over the last few years:
161 * - Matt Mathis's Rate Halving which slowly drops
162 * the congestion window so that the ack clock can
163 * be maintained during a recovery.
164 * - Yuchung Cheng's RACK TCP (for which its named) that
165 * will stop us using the number of dup acks and instead
166 * use time as the gage of when we retransmit.
167 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
168 * of Dukkipati et.al.
169 * RACK depends on SACK, so if an endpoint arrives that
170 * cannot do SACK the state machine below will shuttle the
171 * connection back to using the "default" TCP stack that is
174 * To implement RACK the original TCP stack was first decomposed
175 * into a functional state machine with individual states
176 * for each of the possible TCP connection states. The do_segement
177 * functions role in life is to mandate the connection supports SACK
178 * initially and then assure that the RACK state matches the conenction
179 * state before calling the states do_segment function. Each
180 * state is simplified due to the fact that the original do_segment
181 * has been decomposed and we *know* what state we are in (no
182 * switches on the state) and all tests for SACK are gone. This
183 * greatly simplifies what each state does.
185 * TCP output is also over-written with a new version since it
186 * must maintain the new rack scoreboard.
189 static int32_t rack_tlp_thresh = 1;
190 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */
191 static int32_t rack_tlp_use_greater = 1;
192 static int32_t rack_reorder_thresh = 2;
193 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000
195 static uint8_t rack_req_measurements = 1;
196 /* Attack threshold detections */
197 static uint32_t rack_highest_sack_thresh_seen = 0;
198 static uint32_t rack_highest_move_thresh_seen = 0;
199 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */
200 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */
201 static int32_t rack_hw_rate_caps = 1; /* 1; */
202 static int32_t rack_hw_rate_min = 0; /* 1500000;*/
203 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */
204 static int32_t rack_hw_up_only = 1;
205 static int32_t rack_stats_gets_ms_rtt = 1;
206 static int32_t rack_prr_addbackmax = 2;
208 static int32_t rack_pkt_delay = 1000;
209 static int32_t rack_send_a_lot_in_prr = 1;
210 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */
211 static int32_t rack_verbose_logging = 0;
212 static int32_t rack_ignore_data_after_close = 1;
213 static int32_t rack_enable_shared_cwnd = 1;
214 static int32_t rack_use_cmp_acks = 1;
215 static int32_t rack_use_fsb = 1;
216 static int32_t rack_use_rfo = 1;
217 static int32_t rack_use_rsm_rfo = 1;
218 static int32_t rack_max_abc_post_recovery = 2;
219 static int32_t rack_client_low_buf = 0;
220 #ifdef TCP_ACCOUNTING
221 static int32_t rack_tcp_accounting = 0;
223 static int32_t rack_limits_scwnd = 1;
224 static int32_t rack_enable_mqueue_for_nonpaced = 0;
225 static int32_t rack_disable_prr = 0;
226 static int32_t use_rack_rr = 1;
227 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */
228 static int32_t rack_persist_min = 250000; /* 250usec */
229 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */
230 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */
231 static int32_t rack_default_init_window = 0; /* Use system default */
232 static int32_t rack_limit_time_with_srtt = 0;
233 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */
234 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */
235 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */
236 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */
237 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */
239 * Currently regular tcp has a rto_min of 30ms
240 * the backoff goes 12 times so that ends up
241 * being a total of 122.850 seconds before a
242 * connection is killed.
244 static uint32_t rack_def_data_window = 20;
245 static uint32_t rack_goal_bdp = 2;
246 static uint32_t rack_min_srtts = 1;
247 static uint32_t rack_min_measure_usec = 0;
248 static int32_t rack_tlp_min = 10000; /* 10ms */
249 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */
250 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */
251 static const int32_t rack_free_cache = 2;
252 static int32_t rack_hptsi_segments = 40;
253 static int32_t rack_rate_sample_method = USE_RTT_LOW;
254 static int32_t rack_pace_every_seg = 0;
255 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */
256 static int32_t rack_slot_reduction = 4;
257 static int32_t rack_wma_divisor = 8; /* For WMA calculation */
258 static int32_t rack_cwnd_block_ends_measure = 0;
259 static int32_t rack_rwnd_block_ends_measure = 0;
260 static int32_t rack_def_profile = 0;
262 static int32_t rack_lower_cwnd_at_tlp = 0;
263 static int32_t rack_limited_retran = 0;
264 static int32_t rack_always_send_oldest = 0;
265 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
267 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */
268 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */
269 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */
272 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */
273 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */
274 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */
275 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */
276 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */
278 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */
279 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */
280 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */
281 static uint32_t rack_probertt_use_min_rtt_exit = 0;
282 static uint32_t rack_probe_rtt_sets_cwnd = 0;
283 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */
284 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */
285 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */
286 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */
287 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */
288 static uint32_t rack_probertt_filter_life = 10000000;
289 static uint32_t rack_probertt_lower_within = 10;
290 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */
291 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */
292 static int32_t rack_probertt_clear_is = 1;
293 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */
294 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */
297 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */
299 /* Timely information */
300 /* Combine these two gives the range of 'no change' to bw */
301 /* ie the up/down provide the upper and lower bound */
302 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */
303 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */
304 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */
305 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */
306 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */
307 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multipler */
308 static int32_t rack_gp_increase_per = 2; /* 2% increase in multipler */
309 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */
310 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */
311 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */
312 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */
313 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */
314 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */
315 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */
316 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */
317 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */
318 static int32_t rack_use_max_for_nobackoff = 0;
319 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */
320 static int32_t rack_timely_no_stopping = 0;
321 static int32_t rack_down_raise_thresh = 100;
322 static int32_t rack_req_segs = 1;
323 static uint64_t rack_bw_rate_cap = 0;
325 /* Weird delayed ack mode */
326 static int32_t rack_use_imac_dack = 0;
327 /* Rack specific counters */
328 counter_u64_t rack_badfr;
329 counter_u64_t rack_badfr_bytes;
330 counter_u64_t rack_rtm_prr_retran;
331 counter_u64_t rack_rtm_prr_newdata;
332 counter_u64_t rack_timestamp_mismatch;
333 counter_u64_t rack_reorder_seen;
334 counter_u64_t rack_paced_segments;
335 counter_u64_t rack_unpaced_segments;
336 counter_u64_t rack_calc_zero;
337 counter_u64_t rack_calc_nonzero;
338 counter_u64_t rack_saw_enobuf;
339 counter_u64_t rack_saw_enobuf_hw;
340 counter_u64_t rack_saw_enetunreach;
341 counter_u64_t rack_per_timer_hole;
342 counter_u64_t rack_large_ackcmp;
343 counter_u64_t rack_small_ackcmp;
345 counter_u64_t rack_adjust_map_bw;
347 /* Tail loss probe counters */
348 counter_u64_t rack_tlp_tot;
349 counter_u64_t rack_tlp_newdata;
350 counter_u64_t rack_tlp_retran;
351 counter_u64_t rack_tlp_retran_bytes;
352 counter_u64_t rack_tlp_retran_fail;
353 counter_u64_t rack_to_tot;
354 counter_u64_t rack_to_arm_rack;
355 counter_u64_t rack_to_arm_tlp;
356 counter_u64_t rack_hot_alloc;
357 counter_u64_t rack_to_alloc;
358 counter_u64_t rack_to_alloc_hard;
359 counter_u64_t rack_to_alloc_emerg;
360 counter_u64_t rack_to_alloc_limited;
361 counter_u64_t rack_alloc_limited_conns;
362 counter_u64_t rack_split_limited;
364 #define MAX_NUM_OF_CNTS 13
365 counter_u64_t rack_proc_comp_ack[MAX_NUM_OF_CNTS];
366 counter_u64_t rack_multi_single_eq;
367 counter_u64_t rack_proc_non_comp_ack;
369 counter_u64_t rack_fto_send;
370 counter_u64_t rack_fto_rsm_send;
371 counter_u64_t rack_nfto_resend;
372 counter_u64_t rack_non_fto_send;
373 counter_u64_t rack_extended_rfo;
375 counter_u64_t rack_sack_proc_all;
376 counter_u64_t rack_sack_proc_short;
377 counter_u64_t rack_sack_proc_restart;
378 counter_u64_t rack_sack_attacks_detected;
379 counter_u64_t rack_sack_attacks_reversed;
380 counter_u64_t rack_sack_used_next_merge;
381 counter_u64_t rack_sack_splits;
382 counter_u64_t rack_sack_used_prev_merge;
383 counter_u64_t rack_sack_skipped_acked;
384 counter_u64_t rack_ack_total;
385 counter_u64_t rack_express_sack;
386 counter_u64_t rack_sack_total;
387 counter_u64_t rack_move_none;
388 counter_u64_t rack_move_some;
390 counter_u64_t rack_used_tlpmethod;
391 counter_u64_t rack_used_tlpmethod2;
392 counter_u64_t rack_enter_tlp_calc;
393 counter_u64_t rack_input_idle_reduces;
394 counter_u64_t rack_collapsed_win;
395 counter_u64_t rack_tlp_does_nada;
396 counter_u64_t rack_try_scwnd;
397 counter_u64_t rack_hw_pace_init_fail;
398 counter_u64_t rack_hw_pace_lost;
399 counter_u64_t rack_sbsndptr_right;
400 counter_u64_t rack_sbsndptr_wrong;
402 /* Temp CPU counters */
403 counter_u64_t rack_find_high;
405 counter_u64_t rack_progress_drops;
406 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
407 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
410 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2)))
412 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \
413 (tv) = (value) + slop; \
414 if ((u_long)(tv) < (u_long)(tvmin)) \
416 if ((u_long)(tv) > (u_long)(tvmax)) \
421 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line);
424 rack_process_ack(struct mbuf *m, struct tcphdr *th,
425 struct socket *so, struct tcpcb *tp, struct tcpopt *to,
426 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val);
428 rack_process_data(struct mbuf *m, struct tcphdr *th,
429 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
430 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
432 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
433 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery);
434 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
435 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack,
437 static struct rack_sendmap *
438 rack_check_recovery_mode(struct tcpcb *tp,
441 rack_cong_signal(struct tcpcb *tp,
442 uint32_t type, uint32_t ack);
443 static void rack_counter_destroy(void);
445 rack_ctloutput(struct socket *so, struct sockopt *sopt,
446 struct inpcb *inp, struct tcpcb *tp);
447 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
449 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override);
451 rack_do_segment(struct mbuf *m, struct tcphdr *th,
452 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
454 static void rack_dtor(void *mem, int32_t size, void *arg);
456 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
457 uint32_t flex1, uint32_t flex2,
458 uint32_t flex3, uint32_t flex4,
459 uint32_t flex5, uint32_t flex6,
460 uint16_t flex7, uint8_t mod);
462 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
463 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, struct rack_sendmap *rsm);
464 static struct rack_sendmap *
465 rack_find_high_nonack(struct tcp_rack *rack,
466 struct rack_sendmap *rsm);
467 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
468 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
469 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
471 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
472 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
474 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
475 tcp_seq th_ack, int line);
477 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss);
478 static int32_t rack_handoff_ok(struct tcpcb *tp);
479 static int32_t rack_init(struct tcpcb *tp);
480 static void rack_init_sysctls(void);
482 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
483 struct tcphdr *th, int entered_rec, int dup_ack_struck);
485 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
486 uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t ts,
487 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls);
490 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
491 struct rack_sendmap *rsm);
492 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm);
493 static int32_t rack_output(struct tcpcb *tp);
496 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
497 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
498 uint32_t cts, int *moved_two);
499 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq);
500 static void rack_remxt_tmr(struct tcpcb *tp);
502 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
503 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
504 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
505 static int32_t rack_stopall(struct tcpcb *tp);
507 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type,
509 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type);
510 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
511 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type);
513 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
514 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag);
516 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
517 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag);
519 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
520 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack);
521 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
523 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
524 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
525 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
527 rack_do_closing(struct mbuf *m, struct tcphdr *th,
528 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
529 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
531 rack_do_established(struct mbuf *m, struct tcphdr *th,
532 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
533 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
535 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
536 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
537 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos);
539 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
540 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
541 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
543 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
544 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
545 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
547 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
548 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
549 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
551 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
552 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
553 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
555 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
556 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
557 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
558 struct rack_sendmap *
559 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
561 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt,
562 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt);
564 tcp_rack_partialack(struct tcpcb *tp);
566 rack_set_profile(struct tcp_rack *rack, int prof);
568 rack_apply_deferred_options(struct tcp_rack *rack);
570 int32_t rack_clear_counter=0;
573 rack_set_cc_pacing(struct tcp_rack *rack)
576 struct cc_newreno_opts opt;
577 struct newreno old, *ptr;
581 if (rack->rc_pacing_cc_set)
585 if (tp->cc_algo == NULL) {
587 printf("No cc algorithm?\n");
590 rack->rc_pacing_cc_set = 1;
591 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
592 /* Not new-reno we can't play games with beta! */
595 ptr = ((struct newreno *)tp->ccv->cc_data);
596 if (CC_ALGO(tp)->ctl_output == NULL) {
597 /* Huh, why does new_reno no longer have a set function? */
598 printf("no ctl_output for algo:%s\n", tp->cc_algo->name);
602 /* Just the default values */
603 old.beta = V_newreno_beta_ecn;
604 old.beta_ecn = V_newreno_beta_ecn;
605 old.newreno_flags = 0;
607 old.beta = ptr->beta;
608 old.beta_ecn = ptr->beta_ecn;
609 old.newreno_flags = ptr->newreno_flags;
611 sopt.sopt_valsize = sizeof(struct cc_newreno_opts);
612 sopt.sopt_dir = SOPT_SET;
613 opt.name = CC_NEWRENO_BETA;
614 opt.val = rack->r_ctl.rc_saved_beta.beta;
615 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
617 printf("Error returned by ctl_output %d\n", error);
621 * Hack alert we need to set in our newreno_flags
622 * so that Abe behavior is also applied.
624 ((struct newreno *)tp->ccv->cc_data)->newreno_flags = CC_NEWRENO_BETA_ECN;
625 opt.name = CC_NEWRENO_BETA_ECN;
626 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn;
627 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
629 printf("Error returned by ctl_output %d\n", error);
632 /* Save off the original values for restoral */
633 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno));
635 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
636 union tcp_log_stackspecific log;
639 ptr = ((struct newreno *)tp->ccv->cc_data);
640 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
641 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
643 log.u_bbr.flex1 = ptr->beta;
644 log.u_bbr.flex2 = ptr->beta_ecn;
645 log.u_bbr.flex3 = ptr->newreno_flags;
647 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta;
648 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn;
649 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags;
650 log.u_bbr.flex7 = rack->gp_ready;
651 log.u_bbr.flex7 <<= 1;
652 log.u_bbr.flex7 |= rack->use_fixed_rate;
653 log.u_bbr.flex7 <<= 1;
654 log.u_bbr.flex7 |= rack->rc_pacing_cc_set;
655 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
657 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error,
658 0, &log, false, NULL, NULL, 0, &tv);
663 rack_undo_cc_pacing(struct tcp_rack *rack)
665 struct newreno old, *ptr;
668 if (rack->rc_pacing_cc_set == 0)
671 rack->rc_pacing_cc_set = 0;
672 if (tp->cc_algo == NULL)
675 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
676 /* Not new-reno nothing to do! */
679 ptr = ((struct newreno *)tp->ccv->cc_data);
682 * This happens at rack_fini() if the
683 * cc module gets freed on us. In that
684 * case we loose our "new" settings but
685 * thats ok, since the tcb is going away anyway.
689 /* Grab out our set values */
690 memcpy(&old, ptr, sizeof(struct newreno));
691 /* Copy back in the original values */
692 memcpy(ptr, &rack->r_ctl.rc_saved_beta, sizeof(struct newreno));
693 /* Now save back the values we had set in (for when pacing is restored) */
694 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno));
695 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
696 union tcp_log_stackspecific log;
699 ptr = ((struct newreno *)tp->ccv->cc_data);
700 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
701 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
702 log.u_bbr.flex1 = ptr->beta;
703 log.u_bbr.flex2 = ptr->beta_ecn;
704 log.u_bbr.flex3 = ptr->newreno_flags;
705 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta;
706 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn;
707 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags;
708 log.u_bbr.flex7 = rack->gp_ready;
709 log.u_bbr.flex7 <<= 1;
710 log.u_bbr.flex7 |= rack->use_fixed_rate;
711 log.u_bbr.flex7 <<= 1;
712 log.u_bbr.flex7 |= rack->rc_pacing_cc_set;
713 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
715 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
716 0, &log, false, NULL, NULL, 0, &tv);
720 #ifdef NETFLIX_PEAKRATE
722 rack_update_peakrate_thr(struct tcpcb *tp)
724 /* Keep in mind that t_maxpeakrate is in B/s. */
726 peak = uqmax((tp->t_maxseg * 2),
727 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC));
728 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX);
733 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
739 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
740 if (error || req->newptr == NULL)
743 error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
748 printf("Clearing RACK counters\n");
750 counter_u64_zero(rack_badfr);
751 counter_u64_zero(rack_badfr_bytes);
752 counter_u64_zero(rack_rtm_prr_retran);
753 counter_u64_zero(rack_rtm_prr_newdata);
754 counter_u64_zero(rack_timestamp_mismatch);
755 counter_u64_zero(rack_reorder_seen);
756 counter_u64_zero(rack_tlp_tot);
757 counter_u64_zero(rack_tlp_newdata);
758 counter_u64_zero(rack_tlp_retran);
759 counter_u64_zero(rack_tlp_retran_bytes);
760 counter_u64_zero(rack_tlp_retran_fail);
761 counter_u64_zero(rack_to_tot);
762 counter_u64_zero(rack_to_arm_rack);
763 counter_u64_zero(rack_to_arm_tlp);
764 counter_u64_zero(rack_paced_segments);
765 counter_u64_zero(rack_calc_zero);
766 counter_u64_zero(rack_calc_nonzero);
767 counter_u64_zero(rack_unpaced_segments);
768 counter_u64_zero(rack_saw_enobuf);
769 counter_u64_zero(rack_saw_enobuf_hw);
770 counter_u64_zero(rack_saw_enetunreach);
771 counter_u64_zero(rack_per_timer_hole);
772 counter_u64_zero(rack_large_ackcmp);
773 counter_u64_zero(rack_small_ackcmp);
775 counter_u64_zero(rack_adjust_map_bw);
777 counter_u64_zero(rack_to_alloc_hard);
778 counter_u64_zero(rack_to_alloc_emerg);
779 counter_u64_zero(rack_sack_proc_all);
780 counter_u64_zero(rack_fto_send);
781 counter_u64_zero(rack_fto_rsm_send);
782 counter_u64_zero(rack_extended_rfo);
783 counter_u64_zero(rack_hw_pace_init_fail);
784 counter_u64_zero(rack_hw_pace_lost);
785 counter_u64_zero(rack_sbsndptr_wrong);
786 counter_u64_zero(rack_sbsndptr_right);
787 counter_u64_zero(rack_non_fto_send);
788 counter_u64_zero(rack_nfto_resend);
789 counter_u64_zero(rack_sack_proc_short);
790 counter_u64_zero(rack_sack_proc_restart);
791 counter_u64_zero(rack_to_alloc);
792 counter_u64_zero(rack_to_alloc_limited);
793 counter_u64_zero(rack_alloc_limited_conns);
794 counter_u64_zero(rack_split_limited);
795 for (i = 0; i < MAX_NUM_OF_CNTS; i++) {
796 counter_u64_zero(rack_proc_comp_ack[i]);
798 counter_u64_zero(rack_multi_single_eq);
799 counter_u64_zero(rack_proc_non_comp_ack);
800 counter_u64_zero(rack_find_high);
801 counter_u64_zero(rack_sack_attacks_detected);
802 counter_u64_zero(rack_sack_attacks_reversed);
803 counter_u64_zero(rack_sack_used_next_merge);
804 counter_u64_zero(rack_sack_used_prev_merge);
805 counter_u64_zero(rack_sack_splits);
806 counter_u64_zero(rack_sack_skipped_acked);
807 counter_u64_zero(rack_ack_total);
808 counter_u64_zero(rack_express_sack);
809 counter_u64_zero(rack_sack_total);
810 counter_u64_zero(rack_move_none);
811 counter_u64_zero(rack_move_some);
812 counter_u64_zero(rack_used_tlpmethod);
813 counter_u64_zero(rack_used_tlpmethod2);
814 counter_u64_zero(rack_enter_tlp_calc);
815 counter_u64_zero(rack_progress_drops);
816 counter_u64_zero(rack_tlp_does_nada);
817 counter_u64_zero(rack_try_scwnd);
818 counter_u64_zero(rack_collapsed_win);
820 rack_clear_counter = 0;
825 rack_init_sysctls(void)
828 struct sysctl_oid *rack_counters;
829 struct sysctl_oid *rack_attack;
830 struct sysctl_oid *rack_pacing;
831 struct sysctl_oid *rack_timely;
832 struct sysctl_oid *rack_timers;
833 struct sysctl_oid *rack_tlp;
834 struct sysctl_oid *rack_misc;
835 struct sysctl_oid *rack_measure;
836 struct sysctl_oid *rack_probertt;
837 struct sysctl_oid *rack_hw_pacing;
839 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
840 SYSCTL_CHILDREN(rack_sysctl_root),
843 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
844 "Rack Sack Attack Counters and Controls");
845 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
846 SYSCTL_CHILDREN(rack_sysctl_root),
849 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
851 SYSCTL_ADD_S32(&rack_sysctl_ctx,
852 SYSCTL_CHILDREN(rack_sysctl_root),
853 OID_AUTO, "rate_sample_method", CTLFLAG_RW,
854 &rack_rate_sample_method , USE_RTT_LOW,
855 "What method should we use for rate sampling 0=high, 1=low ");
856 /* Probe rtt related controls */
857 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
858 SYSCTL_CHILDREN(rack_sysctl_root),
861 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
862 "ProbeRTT related Controls");
863 SYSCTL_ADD_U16(&rack_sysctl_ctx,
864 SYSCTL_CHILDREN(rack_probertt),
865 OID_AUTO, "exit_per_hpb", CTLFLAG_RW,
866 &rack_atexit_prtt_hbp, 130,
867 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%");
868 SYSCTL_ADD_U16(&rack_sysctl_ctx,
869 SYSCTL_CHILDREN(rack_probertt),
870 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW,
871 &rack_atexit_prtt, 130,
872 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%");
873 SYSCTL_ADD_U16(&rack_sysctl_ctx,
874 SYSCTL_CHILDREN(rack_probertt),
875 OID_AUTO, "gp_per_mul", CTLFLAG_RW,
876 &rack_per_of_gp_probertt, 60,
877 "What percentage of goodput do we pace at in probertt");
878 SYSCTL_ADD_U16(&rack_sysctl_ctx,
879 SYSCTL_CHILDREN(rack_probertt),
880 OID_AUTO, "gp_per_reduce", CTLFLAG_RW,
881 &rack_per_of_gp_probertt_reduce, 10,
882 "What percentage of goodput do we reduce every gp_srtt");
883 SYSCTL_ADD_U16(&rack_sysctl_ctx,
884 SYSCTL_CHILDREN(rack_probertt),
885 OID_AUTO, "gp_per_low", CTLFLAG_RW,
886 &rack_per_of_gp_lowthresh, 40,
887 "What percentage of goodput do we allow the multiplier to fall to");
888 SYSCTL_ADD_U32(&rack_sysctl_ctx,
889 SYSCTL_CHILDREN(rack_probertt),
890 OID_AUTO, "time_between", CTLFLAG_RW,
891 & rack_time_between_probertt, 96000000,
892 "How many useconds between the lowest rtt falling must past before we enter probertt");
893 SYSCTL_ADD_U32(&rack_sysctl_ctx,
894 SYSCTL_CHILDREN(rack_probertt),
895 OID_AUTO, "safety", CTLFLAG_RW,
896 &rack_probe_rtt_safety_val, 2000000,
897 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)");
898 SYSCTL_ADD_U32(&rack_sysctl_ctx,
899 SYSCTL_CHILDREN(rack_probertt),
900 OID_AUTO, "sets_cwnd", CTLFLAG_RW,
901 &rack_probe_rtt_sets_cwnd, 0,
902 "Do we set the cwnd too (if always_lower is on)");
903 SYSCTL_ADD_U32(&rack_sysctl_ctx,
904 SYSCTL_CHILDREN(rack_probertt),
905 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW,
906 &rack_max_drain_wait, 2,
907 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal");
908 SYSCTL_ADD_U32(&rack_sysctl_ctx,
909 SYSCTL_CHILDREN(rack_probertt),
910 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW,
912 "We must drain this many gp_srtt's waiting for flight to reach goal");
913 SYSCTL_ADD_U32(&rack_sysctl_ctx,
914 SYSCTL_CHILDREN(rack_probertt),
915 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW,
916 &rack_probertt_use_min_rtt_entry, 1,
917 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry");
918 SYSCTL_ADD_U32(&rack_sysctl_ctx,
919 SYSCTL_CHILDREN(rack_probertt),
920 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW,
921 &rack_probertt_use_min_rtt_exit, 0,
922 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt");
923 SYSCTL_ADD_U32(&rack_sysctl_ctx,
924 SYSCTL_CHILDREN(rack_probertt),
925 OID_AUTO, "length_div", CTLFLAG_RW,
926 &rack_probertt_gpsrtt_cnt_div, 0,
927 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)");
928 SYSCTL_ADD_U32(&rack_sysctl_ctx,
929 SYSCTL_CHILDREN(rack_probertt),
930 OID_AUTO, "length_mul", CTLFLAG_RW,
931 &rack_probertt_gpsrtt_cnt_mul, 0,
932 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)");
933 SYSCTL_ADD_U32(&rack_sysctl_ctx,
934 SYSCTL_CHILDREN(rack_probertt),
935 OID_AUTO, "holdtim_at_target", CTLFLAG_RW,
936 &rack_min_probertt_hold, 200000,
937 "What is the minimum time we hold probertt at target");
938 SYSCTL_ADD_U32(&rack_sysctl_ctx,
939 SYSCTL_CHILDREN(rack_probertt),
940 OID_AUTO, "filter_life", CTLFLAG_RW,
941 &rack_probertt_filter_life, 10000000,
942 "What is the time for the filters life in useconds");
943 SYSCTL_ADD_U32(&rack_sysctl_ctx,
944 SYSCTL_CHILDREN(rack_probertt),
945 OID_AUTO, "lower_within", CTLFLAG_RW,
946 &rack_probertt_lower_within, 10,
947 "If the rtt goes lower within this percentage of the time, go into probe-rtt");
948 SYSCTL_ADD_U32(&rack_sysctl_ctx,
949 SYSCTL_CHILDREN(rack_probertt),
950 OID_AUTO, "must_move", CTLFLAG_RW,
951 &rack_min_rtt_movement, 250,
952 "How much is the minimum movement in rtt to count as a drop for probertt purposes");
953 SYSCTL_ADD_U32(&rack_sysctl_ctx,
954 SYSCTL_CHILDREN(rack_probertt),
955 OID_AUTO, "clear_is_cnts", CTLFLAG_RW,
956 &rack_probertt_clear_is, 1,
957 "Do we clear I/S counts on exiting probe-rtt");
958 SYSCTL_ADD_S32(&rack_sysctl_ctx,
959 SYSCTL_CHILDREN(rack_probertt),
960 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW,
961 &rack_max_drain_hbp, 1,
962 "How many extra drain gpsrtt's do we get in highly buffered paths");
963 SYSCTL_ADD_S32(&rack_sysctl_ctx,
964 SYSCTL_CHILDREN(rack_probertt),
965 OID_AUTO, "hbp_threshold", CTLFLAG_RW,
967 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold");
968 /* Pacing related sysctls */
969 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
970 SYSCTL_CHILDREN(rack_sysctl_root),
973 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
974 "Pacing related Controls");
975 SYSCTL_ADD_S32(&rack_sysctl_ctx,
976 SYSCTL_CHILDREN(rack_pacing),
977 OID_AUTO, "max_pace_over", CTLFLAG_RW,
978 &rack_max_per_above, 30,
979 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)");
980 SYSCTL_ADD_S32(&rack_sysctl_ctx,
981 SYSCTL_CHILDREN(rack_pacing),
982 OID_AUTO, "pace_to_one", CTLFLAG_RW,
983 &rack_pace_one_seg, 0,
984 "Do we allow low b/w pacing of 1MSS instead of two");
985 SYSCTL_ADD_S32(&rack_sysctl_ctx,
986 SYSCTL_CHILDREN(rack_pacing),
987 OID_AUTO, "limit_wsrtt", CTLFLAG_RW,
988 &rack_limit_time_with_srtt, 0,
989 "Do we limit pacing time based on srtt");
990 SYSCTL_ADD_S32(&rack_sysctl_ctx,
991 SYSCTL_CHILDREN(rack_pacing),
992 OID_AUTO, "init_win", CTLFLAG_RW,
993 &rack_default_init_window, 0,
994 "Do we have a rack initial window 0 = system default");
995 SYSCTL_ADD_U16(&rack_sysctl_ctx,
996 SYSCTL_CHILDREN(rack_pacing),
997 OID_AUTO, "gp_per_ss", CTLFLAG_RW,
998 &rack_per_of_gp_ss, 250,
999 "If non zero, what percentage of goodput to pace at in slow start");
1000 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1001 SYSCTL_CHILDREN(rack_pacing),
1002 OID_AUTO, "gp_per_ca", CTLFLAG_RW,
1003 &rack_per_of_gp_ca, 150,
1004 "If non zero, what percentage of goodput to pace at in congestion avoidance");
1005 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1006 SYSCTL_CHILDREN(rack_pacing),
1007 OID_AUTO, "gp_per_rec", CTLFLAG_RW,
1008 &rack_per_of_gp_rec, 200,
1009 "If non zero, what percentage of goodput to pace at in recovery");
1010 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1011 SYSCTL_CHILDREN(rack_pacing),
1012 OID_AUTO, "pace_max_seg", CTLFLAG_RW,
1013 &rack_hptsi_segments, 40,
1014 "What size is the max for TSO segments in pacing and burst mitigation");
1015 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1016 SYSCTL_CHILDREN(rack_pacing),
1017 OID_AUTO, "burst_reduces", CTLFLAG_RW,
1018 &rack_slot_reduction, 4,
1019 "When doing only burst mitigation what is the reduce divisor");
1020 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1021 SYSCTL_CHILDREN(rack_sysctl_root),
1022 OID_AUTO, "use_pacing", CTLFLAG_RW,
1023 &rack_pace_every_seg, 0,
1024 "If set we use pacing, if clear we use only the original burst mitigation");
1025 SYSCTL_ADD_U64(&rack_sysctl_ctx,
1026 SYSCTL_CHILDREN(rack_pacing),
1027 OID_AUTO, "rate_cap", CTLFLAG_RW,
1028 &rack_bw_rate_cap, 0,
1029 "If set we apply this value to the absolute rate cap used by pacing");
1030 SYSCTL_ADD_U8(&rack_sysctl_ctx,
1031 SYSCTL_CHILDREN(rack_sysctl_root),
1032 OID_AUTO, "req_measure_cnt", CTLFLAG_RW,
1033 &rack_req_measurements, 1,
1034 "If doing dynamic pacing, how many measurements must be in before we start pacing?");
1035 /* Hardware pacing */
1036 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1037 SYSCTL_CHILDREN(rack_sysctl_root),
1040 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1041 "Pacing related Controls");
1042 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1043 SYSCTL_CHILDREN(rack_hw_pacing),
1044 OID_AUTO, "rwnd_factor", CTLFLAG_RW,
1045 &rack_hw_rwnd_factor, 2,
1046 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?");
1047 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1048 SYSCTL_CHILDREN(rack_hw_pacing),
1049 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW,
1050 &rack_enobuf_hw_boost_mult, 2,
1051 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?");
1052 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1053 SYSCTL_CHILDREN(rack_hw_pacing),
1054 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW,
1055 &rack_enobuf_hw_max, 2,
1056 "What is the max boost the pacing time if we see a ENOBUFS?");
1057 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1058 SYSCTL_CHILDREN(rack_hw_pacing),
1059 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW,
1060 &rack_enobuf_hw_min, 2,
1061 "What is the min boost the pacing time if we see a ENOBUFS?");
1062 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1063 SYSCTL_CHILDREN(rack_hw_pacing),
1064 OID_AUTO, "enable", CTLFLAG_RW,
1065 &rack_enable_hw_pacing, 0,
1066 "Should RACK attempt to use hw pacing?");
1067 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1068 SYSCTL_CHILDREN(rack_hw_pacing),
1069 OID_AUTO, "rate_cap", CTLFLAG_RW,
1070 &rack_hw_rate_caps, 1,
1071 "Does the highest hardware pacing rate cap the rate we will send at??");
1072 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1073 SYSCTL_CHILDREN(rack_hw_pacing),
1074 OID_AUTO, "rate_min", CTLFLAG_RW,
1075 &rack_hw_rate_min, 0,
1076 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?");
1077 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1078 SYSCTL_CHILDREN(rack_hw_pacing),
1079 OID_AUTO, "rate_to_low", CTLFLAG_RW,
1080 &rack_hw_rate_to_low, 0,
1081 "If we fall below this rate, dis-engage hw pacing?");
1082 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1083 SYSCTL_CHILDREN(rack_hw_pacing),
1084 OID_AUTO, "up_only", CTLFLAG_RW,
1085 &rack_hw_up_only, 1,
1086 "Do we allow hw pacing to lower the rate selected?");
1087 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1088 SYSCTL_CHILDREN(rack_hw_pacing),
1089 OID_AUTO, "extra_mss_precise", CTLFLAG_RW,
1090 &rack_hw_pace_extra_slots, 2,
1091 "If the rates between software and hardware match precisely how many extra time_betweens do we get?");
1092 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1093 SYSCTL_CHILDREN(rack_sysctl_root),
1096 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1097 "Rack Timely RTT Controls");
1098 /* Timely based GP dynmics */
1099 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1100 SYSCTL_CHILDREN(rack_timely),
1101 OID_AUTO, "upper", CTLFLAG_RW,
1102 &rack_gp_per_bw_mul_up, 2,
1103 "Rack timely upper range for equal b/w (in percentage)");
1104 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1105 SYSCTL_CHILDREN(rack_timely),
1106 OID_AUTO, "lower", CTLFLAG_RW,
1107 &rack_gp_per_bw_mul_down, 4,
1108 "Rack timely lower range for equal b/w (in percentage)");
1109 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1110 SYSCTL_CHILDREN(rack_timely),
1111 OID_AUTO, "rtt_max_mul", CTLFLAG_RW,
1112 &rack_gp_rtt_maxmul, 3,
1113 "Rack timely multipler of lowest rtt for rtt_max");
1114 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1115 SYSCTL_CHILDREN(rack_timely),
1116 OID_AUTO, "rtt_min_div", CTLFLAG_RW,
1117 &rack_gp_rtt_mindiv, 4,
1118 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt");
1119 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1120 SYSCTL_CHILDREN(rack_timely),
1121 OID_AUTO, "rtt_min_mul", CTLFLAG_RW,
1122 &rack_gp_rtt_minmul, 1,
1123 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt");
1124 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1125 SYSCTL_CHILDREN(rack_timely),
1126 OID_AUTO, "decrease", CTLFLAG_RW,
1127 &rack_gp_decrease_per, 20,
1128 "Rack timely decrease percentage of our GP multiplication factor");
1129 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1130 SYSCTL_CHILDREN(rack_timely),
1131 OID_AUTO, "increase", CTLFLAG_RW,
1132 &rack_gp_increase_per, 2,
1133 "Rack timely increase perentage of our GP multiplication factor");
1134 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1135 SYSCTL_CHILDREN(rack_timely),
1136 OID_AUTO, "lowerbound", CTLFLAG_RW,
1137 &rack_per_lower_bound, 50,
1138 "Rack timely lowest percentage we allow GP multiplier to fall to");
1139 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1140 SYSCTL_CHILDREN(rack_timely),
1141 OID_AUTO, "upperboundss", CTLFLAG_RW,
1142 &rack_per_upper_bound_ss, 0,
1143 "Rack timely higest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)");
1144 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1145 SYSCTL_CHILDREN(rack_timely),
1146 OID_AUTO, "upperboundca", CTLFLAG_RW,
1147 &rack_per_upper_bound_ca, 0,
1148 "Rack timely higest percentage we allow GP multiplier to CA raise to (0 is no upperbound)");
1149 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1150 SYSCTL_CHILDREN(rack_timely),
1151 OID_AUTO, "dynamicgp", CTLFLAG_RW,
1152 &rack_do_dyn_mul, 0,
1153 "Rack timely do we enable dynmaic timely goodput by default");
1154 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1155 SYSCTL_CHILDREN(rack_timely),
1156 OID_AUTO, "no_rec_red", CTLFLAG_RW,
1157 &rack_gp_no_rec_chg, 1,
1158 "Rack timely do we prohibit the recovery multiplier from being lowered");
1159 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1160 SYSCTL_CHILDREN(rack_timely),
1161 OID_AUTO, "red_clear_cnt", CTLFLAG_RW,
1162 &rack_timely_dec_clear, 6,
1163 "Rack timely what threshold do we count to before another boost during b/w decent");
1164 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1165 SYSCTL_CHILDREN(rack_timely),
1166 OID_AUTO, "max_push_rise", CTLFLAG_RW,
1167 &rack_timely_max_push_rise, 3,
1168 "Rack timely how many times do we push up with b/w increase");
1169 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1170 SYSCTL_CHILDREN(rack_timely),
1171 OID_AUTO, "max_push_drop", CTLFLAG_RW,
1172 &rack_timely_max_push_drop, 3,
1173 "Rack timely how many times do we push back on b/w decent");
1174 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1175 SYSCTL_CHILDREN(rack_timely),
1176 OID_AUTO, "min_segs", CTLFLAG_RW,
1177 &rack_timely_min_segs, 4,
1178 "Rack timely when setting the cwnd what is the min num segments");
1179 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1180 SYSCTL_CHILDREN(rack_timely),
1181 OID_AUTO, "noback_max", CTLFLAG_RW,
1182 &rack_use_max_for_nobackoff, 0,
1183 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min");
1184 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1185 SYSCTL_CHILDREN(rack_timely),
1186 OID_AUTO, "interim_timely_only", CTLFLAG_RW,
1187 &rack_timely_int_timely_only, 0,
1188 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)");
1189 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1190 SYSCTL_CHILDREN(rack_timely),
1191 OID_AUTO, "nonstop", CTLFLAG_RW,
1192 &rack_timely_no_stopping, 0,
1193 "Rack timely don't stop increase");
1194 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1195 SYSCTL_CHILDREN(rack_timely),
1196 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW,
1197 &rack_down_raise_thresh, 100,
1198 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)");
1199 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1200 SYSCTL_CHILDREN(rack_timely),
1201 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW,
1203 "Bottom dragging if not these many segments outstanding and room");
1205 /* TLP and Rack related parameters */
1206 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1207 SYSCTL_CHILDREN(rack_sysctl_root),
1210 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1211 "TLP and Rack related Controls");
1212 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1213 SYSCTL_CHILDREN(rack_tlp),
1214 OID_AUTO, "use_rrr", CTLFLAG_RW,
1216 "Do we use Rack Rapid Recovery");
1217 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1218 SYSCTL_CHILDREN(rack_tlp),
1219 OID_AUTO, "post_rec_labc", CTLFLAG_RW,
1220 &rack_max_abc_post_recovery, 2,
1221 "Since we do early recovery, do we override the l_abc to a value, if so what?");
1222 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1223 SYSCTL_CHILDREN(rack_tlp),
1224 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW,
1225 &rack_non_rxt_use_cr, 0,
1226 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk");
1227 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1228 SYSCTL_CHILDREN(rack_tlp),
1229 OID_AUTO, "tlpmethod", CTLFLAG_RW,
1230 &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
1231 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
1232 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1233 SYSCTL_CHILDREN(rack_tlp),
1234 OID_AUTO, "limit", CTLFLAG_RW,
1236 "How many TLP's can be sent without sending new data");
1237 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1238 SYSCTL_CHILDREN(rack_tlp),
1239 OID_AUTO, "use_greater", CTLFLAG_RW,
1240 &rack_tlp_use_greater, 1,
1241 "Should we use the rack_rtt time if its greater than srtt");
1242 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1243 SYSCTL_CHILDREN(rack_tlp),
1244 OID_AUTO, "tlpminto", CTLFLAG_RW,
1245 &rack_tlp_min, 10000,
1246 "TLP minimum timeout per the specification (in microseconds)");
1247 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1248 SYSCTL_CHILDREN(rack_tlp),
1249 OID_AUTO, "send_oldest", CTLFLAG_RW,
1250 &rack_always_send_oldest, 0,
1251 "Should we always send the oldest TLP and RACK-TLP");
1252 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1253 SYSCTL_CHILDREN(rack_tlp),
1254 OID_AUTO, "rack_tlimit", CTLFLAG_RW,
1255 &rack_limited_retran, 0,
1256 "How many times can a rack timeout drive out sends");
1257 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1258 SYSCTL_CHILDREN(rack_tlp),
1259 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
1260 &rack_lower_cwnd_at_tlp, 0,
1261 "When a TLP completes a retran should we enter recovery");
1262 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1263 SYSCTL_CHILDREN(rack_tlp),
1264 OID_AUTO, "reorder_thresh", CTLFLAG_RW,
1265 &rack_reorder_thresh, 2,
1266 "What factor for rack will be added when seeing reordering (shift right)");
1267 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1268 SYSCTL_CHILDREN(rack_tlp),
1269 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
1270 &rack_tlp_thresh, 1,
1271 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
1272 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1273 SYSCTL_CHILDREN(rack_tlp),
1274 OID_AUTO, "reorder_fade", CTLFLAG_RW,
1275 &rack_reorder_fade, 60000000,
1276 "Does reorder detection fade, if so how many microseconds (0 means never)");
1277 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1278 SYSCTL_CHILDREN(rack_tlp),
1279 OID_AUTO, "pktdelay", CTLFLAG_RW,
1280 &rack_pkt_delay, 1000,
1281 "Extra RACK time (in microseconds) besides reordering thresh");
1283 /* Timer related controls */
1284 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1285 SYSCTL_CHILDREN(rack_sysctl_root),
1288 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1289 "Timer related controls");
1290 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1291 SYSCTL_CHILDREN(rack_timers),
1292 OID_AUTO, "persmin", CTLFLAG_RW,
1293 &rack_persist_min, 250000,
1294 "What is the minimum time in microseconds between persists");
1295 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1296 SYSCTL_CHILDREN(rack_timers),
1297 OID_AUTO, "persmax", CTLFLAG_RW,
1298 &rack_persist_max, 2000000,
1299 "What is the largest delay in microseconds between persists");
1300 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1301 SYSCTL_CHILDREN(rack_timers),
1302 OID_AUTO, "delayed_ack", CTLFLAG_RW,
1303 &rack_delayed_ack_time, 40000,
1304 "Delayed ack time (40ms in microseconds)");
1305 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1306 SYSCTL_CHILDREN(rack_timers),
1307 OID_AUTO, "minrto", CTLFLAG_RW,
1308 &rack_rto_min, 30000,
1309 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP");
1310 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1311 SYSCTL_CHILDREN(rack_timers),
1312 OID_AUTO, "maxrto", CTLFLAG_RW,
1313 &rack_rto_max, 4000000,
1314 "Maxiumum RTO in microseconds -- should be at least as large as min_rto");
1315 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1316 SYSCTL_CHILDREN(rack_timers),
1317 OID_AUTO, "minto", CTLFLAG_RW,
1319 "Minimum rack timeout in microseconds");
1320 /* Measure controls */
1321 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1322 SYSCTL_CHILDREN(rack_sysctl_root),
1325 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1326 "Measure related controls");
1327 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1328 SYSCTL_CHILDREN(rack_measure),
1329 OID_AUTO, "wma_divisor", CTLFLAG_RW,
1330 &rack_wma_divisor, 8,
1331 "When doing b/w calculation what is the divisor for the WMA");
1332 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1333 SYSCTL_CHILDREN(rack_measure),
1334 OID_AUTO, "end_cwnd", CTLFLAG_RW,
1335 &rack_cwnd_block_ends_measure, 0,
1336 "Does a cwnd just-return end the measurement window (app limited)");
1337 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1338 SYSCTL_CHILDREN(rack_measure),
1339 OID_AUTO, "end_rwnd", CTLFLAG_RW,
1340 &rack_rwnd_block_ends_measure, 0,
1341 "Does an rwnd just-return end the measurement window (app limited -- not persists)");
1342 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1343 SYSCTL_CHILDREN(rack_measure),
1344 OID_AUTO, "min_target", CTLFLAG_RW,
1345 &rack_def_data_window, 20,
1346 "What is the minimum target window (in mss) for a GP measurements");
1347 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1348 SYSCTL_CHILDREN(rack_measure),
1349 OID_AUTO, "goal_bdp", CTLFLAG_RW,
1351 "What is the goal BDP to measure");
1352 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1353 SYSCTL_CHILDREN(rack_measure),
1354 OID_AUTO, "min_srtts", CTLFLAG_RW,
1356 "What is the goal BDP to measure");
1357 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1358 SYSCTL_CHILDREN(rack_measure),
1359 OID_AUTO, "min_measure_tim", CTLFLAG_RW,
1360 &rack_min_measure_usec, 0,
1361 "What is the Minimum time time for a measurement if 0, this is off");
1362 /* Misc rack controls */
1363 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1364 SYSCTL_CHILDREN(rack_sysctl_root),
1367 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1368 "Misc related controls");
1369 #ifdef TCP_ACCOUNTING
1370 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1371 SYSCTL_CHILDREN(rack_misc),
1372 OID_AUTO, "tcp_acct", CTLFLAG_RW,
1373 &rack_tcp_accounting, 0,
1374 "Should we turn on TCP accounting for all rack sessions?");
1376 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1377 SYSCTL_CHILDREN(rack_misc),
1378 OID_AUTO, "prr_addback_max", CTLFLAG_RW,
1379 &rack_prr_addbackmax, 2,
1380 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?");
1381 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1382 SYSCTL_CHILDREN(rack_misc),
1383 OID_AUTO, "stats_gets_ms", CTLFLAG_RW,
1384 &rack_stats_gets_ms_rtt, 1,
1385 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?");
1386 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1387 SYSCTL_CHILDREN(rack_misc),
1388 OID_AUTO, "clientlowbuf", CTLFLAG_RW,
1389 &rack_client_low_buf, 0,
1390 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?");
1391 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1392 SYSCTL_CHILDREN(rack_misc),
1393 OID_AUTO, "defprofile", CTLFLAG_RW,
1394 &rack_def_profile, 0,
1395 "Should RACK use a default profile (0=no, num == profile num)?");
1396 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1397 SYSCTL_CHILDREN(rack_misc),
1398 OID_AUTO, "cmpack", CTLFLAG_RW,
1399 &rack_use_cmp_acks, 1,
1400 "Should RACK have LRO send compressed acks");
1401 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1402 SYSCTL_CHILDREN(rack_misc),
1403 OID_AUTO, "fsb", CTLFLAG_RW,
1405 "Should RACK use the fast send block?");
1406 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1407 SYSCTL_CHILDREN(rack_misc),
1408 OID_AUTO, "rfo", CTLFLAG_RW,
1410 "Should RACK use rack_fast_output()?");
1411 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1412 SYSCTL_CHILDREN(rack_misc),
1413 OID_AUTO, "rsmrfo", CTLFLAG_RW,
1414 &rack_use_rsm_rfo, 1,
1415 "Should RACK use rack_fast_rsm_output()?");
1416 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1417 SYSCTL_CHILDREN(rack_misc),
1418 OID_AUTO, "shared_cwnd", CTLFLAG_RW,
1419 &rack_enable_shared_cwnd, 1,
1420 "Should RACK try to use the shared cwnd on connections where allowed");
1421 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1422 SYSCTL_CHILDREN(rack_misc),
1423 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW,
1424 &rack_limits_scwnd, 1,
1425 "Should RACK place low end time limits on the shared cwnd feature");
1426 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1427 SYSCTL_CHILDREN(rack_misc),
1428 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW,
1429 &rack_enable_mqueue_for_nonpaced, 0,
1430 "Should RACK use mbuf queuing for non-paced connections");
1431 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1432 SYSCTL_CHILDREN(rack_misc),
1433 OID_AUTO, "iMac_dack", CTLFLAG_RW,
1434 &rack_use_imac_dack, 0,
1435 "Should RACK try to emulate iMac delayed ack");
1436 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1437 SYSCTL_CHILDREN(rack_misc),
1438 OID_AUTO, "no_prr", CTLFLAG_RW,
1439 &rack_disable_prr, 0,
1440 "Should RACK not use prr and only pace (must have pacing on)");
1441 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1442 SYSCTL_CHILDREN(rack_misc),
1443 OID_AUTO, "bb_verbose", CTLFLAG_RW,
1444 &rack_verbose_logging, 0,
1445 "Should RACK black box logging be verbose");
1446 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1447 SYSCTL_CHILDREN(rack_misc),
1448 OID_AUTO, "data_after_close", CTLFLAG_RW,
1449 &rack_ignore_data_after_close, 1,
1450 "Do we hold off sending a RST until all pending data is ack'd");
1451 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1452 SYSCTL_CHILDREN(rack_misc),
1453 OID_AUTO, "no_sack_needed", CTLFLAG_RW,
1454 &rack_sack_not_required, 1,
1455 "Do we allow rack to run on connections not supporting SACK");
1456 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1457 SYSCTL_CHILDREN(rack_misc),
1458 OID_AUTO, "prr_sendalot", CTLFLAG_RW,
1459 &rack_send_a_lot_in_prr, 1,
1460 "Send a lot in prr");
1461 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1462 SYSCTL_CHILDREN(rack_misc),
1463 OID_AUTO, "autoscale", CTLFLAG_RW,
1464 &rack_autosndbuf_inc, 20,
1465 "What percentage should rack scale up its snd buffer by?");
1466 /* Sack Attacker detection stuff */
1467 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1468 SYSCTL_CHILDREN(rack_attack),
1469 OID_AUTO, "detect_highsackratio", CTLFLAG_RW,
1470 &rack_highest_sack_thresh_seen, 0,
1471 "Highest sack to ack ratio seen");
1472 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1473 SYSCTL_CHILDREN(rack_attack),
1474 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW,
1475 &rack_highest_move_thresh_seen, 0,
1476 "Highest move to non-move ratio seen");
1477 rack_ack_total = counter_u64_alloc(M_WAITOK);
1478 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1479 SYSCTL_CHILDREN(rack_attack),
1480 OID_AUTO, "acktotal", CTLFLAG_RD,
1482 "Total number of Ack's");
1483 rack_express_sack = counter_u64_alloc(M_WAITOK);
1484 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1485 SYSCTL_CHILDREN(rack_attack),
1486 OID_AUTO, "exp_sacktotal", CTLFLAG_RD,
1488 "Total expresss number of Sack's");
1489 rack_sack_total = counter_u64_alloc(M_WAITOK);
1490 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1491 SYSCTL_CHILDREN(rack_attack),
1492 OID_AUTO, "sacktotal", CTLFLAG_RD,
1494 "Total number of SACKs");
1495 rack_move_none = counter_u64_alloc(M_WAITOK);
1496 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1497 SYSCTL_CHILDREN(rack_attack),
1498 OID_AUTO, "move_none", CTLFLAG_RD,
1500 "Total number of SACK index reuse of postions under threshold");
1501 rack_move_some = counter_u64_alloc(M_WAITOK);
1502 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1503 SYSCTL_CHILDREN(rack_attack),
1504 OID_AUTO, "move_some", CTLFLAG_RD,
1506 "Total number of SACK index reuse of postions over threshold");
1507 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK);
1508 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1509 SYSCTL_CHILDREN(rack_attack),
1510 OID_AUTO, "attacks", CTLFLAG_RD,
1511 &rack_sack_attacks_detected,
1512 "Total number of SACK attackers that had sack disabled");
1513 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK);
1514 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1515 SYSCTL_CHILDREN(rack_attack),
1516 OID_AUTO, "reversed", CTLFLAG_RD,
1517 &rack_sack_attacks_reversed,
1518 "Total number of SACK attackers that were later determined false positive");
1519 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK);
1520 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1521 SYSCTL_CHILDREN(rack_attack),
1522 OID_AUTO, "nextmerge", CTLFLAG_RD,
1523 &rack_sack_used_next_merge,
1524 "Total number of times we used the next merge");
1525 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK);
1526 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1527 SYSCTL_CHILDREN(rack_attack),
1528 OID_AUTO, "prevmerge", CTLFLAG_RD,
1529 &rack_sack_used_prev_merge,
1530 "Total number of times we used the prev merge");
1532 rack_fto_send = counter_u64_alloc(M_WAITOK);
1533 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1534 SYSCTL_CHILDREN(rack_counters),
1535 OID_AUTO, "fto_send", CTLFLAG_RD,
1536 &rack_fto_send, "Total number of rack_fast_output sends");
1537 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK);
1538 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1539 SYSCTL_CHILDREN(rack_counters),
1540 OID_AUTO, "fto_rsm_send", CTLFLAG_RD,
1541 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends");
1542 rack_nfto_resend = counter_u64_alloc(M_WAITOK);
1543 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1544 SYSCTL_CHILDREN(rack_counters),
1545 OID_AUTO, "nfto_resend", CTLFLAG_RD,
1546 &rack_nfto_resend, "Total number of rack_output retransmissions");
1547 rack_non_fto_send = counter_u64_alloc(M_WAITOK);
1548 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1549 SYSCTL_CHILDREN(rack_counters),
1550 OID_AUTO, "nfto_send", CTLFLAG_RD,
1551 &rack_non_fto_send, "Total number of rack_output first sends");
1552 rack_extended_rfo = counter_u64_alloc(M_WAITOK);
1553 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1554 SYSCTL_CHILDREN(rack_counters),
1555 OID_AUTO, "rfo_extended", CTLFLAG_RD,
1556 &rack_extended_rfo, "Total number of times we extended rfo");
1558 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK);
1559 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1560 SYSCTL_CHILDREN(rack_counters),
1561 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD,
1562 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing");
1563 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK);
1565 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1566 SYSCTL_CHILDREN(rack_counters),
1567 OID_AUTO, "hwpace_lost", CTLFLAG_RD,
1568 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing");
1572 rack_badfr = counter_u64_alloc(M_WAITOK);
1573 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1574 SYSCTL_CHILDREN(rack_counters),
1575 OID_AUTO, "badfr", CTLFLAG_RD,
1576 &rack_badfr, "Total number of bad FRs");
1577 rack_badfr_bytes = counter_u64_alloc(M_WAITOK);
1578 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1579 SYSCTL_CHILDREN(rack_counters),
1580 OID_AUTO, "badfr_bytes", CTLFLAG_RD,
1581 &rack_badfr_bytes, "Total number of bad FRs");
1582 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK);
1583 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1584 SYSCTL_CHILDREN(rack_counters),
1585 OID_AUTO, "prrsndret", CTLFLAG_RD,
1586 &rack_rtm_prr_retran,
1587 "Total number of prr based retransmits");
1588 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK);
1589 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1590 SYSCTL_CHILDREN(rack_counters),
1591 OID_AUTO, "prrsndnew", CTLFLAG_RD,
1592 &rack_rtm_prr_newdata,
1593 "Total number of prr based new transmits");
1594 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK);
1595 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1596 SYSCTL_CHILDREN(rack_counters),
1597 OID_AUTO, "tsnf", CTLFLAG_RD,
1598 &rack_timestamp_mismatch,
1599 "Total number of timestamps that we could not find the reported ts");
1600 rack_find_high = counter_u64_alloc(M_WAITOK);
1601 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1602 SYSCTL_CHILDREN(rack_counters),
1603 OID_AUTO, "findhigh", CTLFLAG_RD,
1605 "Total number of FIN causing find-high");
1606 rack_reorder_seen = counter_u64_alloc(M_WAITOK);
1607 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1608 SYSCTL_CHILDREN(rack_counters),
1609 OID_AUTO, "reordering", CTLFLAG_RD,
1611 "Total number of times we added delay due to reordering");
1612 rack_tlp_tot = counter_u64_alloc(M_WAITOK);
1613 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1614 SYSCTL_CHILDREN(rack_counters),
1615 OID_AUTO, "tlp_to_total", CTLFLAG_RD,
1617 "Total number of tail loss probe expirations");
1618 rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
1619 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1620 SYSCTL_CHILDREN(rack_counters),
1621 OID_AUTO, "tlp_new", CTLFLAG_RD,
1623 "Total number of tail loss probe sending new data");
1624 rack_tlp_retran = counter_u64_alloc(M_WAITOK);
1625 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1626 SYSCTL_CHILDREN(rack_counters),
1627 OID_AUTO, "tlp_retran", CTLFLAG_RD,
1629 "Total number of tail loss probe sending retransmitted data");
1630 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
1631 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1632 SYSCTL_CHILDREN(rack_counters),
1633 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
1634 &rack_tlp_retran_bytes,
1635 "Total bytes of tail loss probe sending retransmitted data");
1636 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK);
1637 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1638 SYSCTL_CHILDREN(rack_counters),
1639 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD,
1640 &rack_tlp_retran_fail,
1641 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)");
1642 rack_to_tot = counter_u64_alloc(M_WAITOK);
1643 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1644 SYSCTL_CHILDREN(rack_counters),
1645 OID_AUTO, "rack_to_tot", CTLFLAG_RD,
1647 "Total number of times the rack to expired");
1648 rack_to_arm_rack = counter_u64_alloc(M_WAITOK);
1649 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1650 SYSCTL_CHILDREN(rack_counters),
1651 OID_AUTO, "arm_rack", CTLFLAG_RD,
1653 "Total number of times the rack timer armed");
1654 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK);
1655 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1656 SYSCTL_CHILDREN(rack_counters),
1657 OID_AUTO, "arm_tlp", CTLFLAG_RD,
1659 "Total number of times the tlp timer armed");
1660 rack_calc_zero = counter_u64_alloc(M_WAITOK);
1661 rack_calc_nonzero = counter_u64_alloc(M_WAITOK);
1662 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1663 SYSCTL_CHILDREN(rack_counters),
1664 OID_AUTO, "calc_zero", CTLFLAG_RD,
1666 "Total number of times pacing time worked out to zero");
1667 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1668 SYSCTL_CHILDREN(rack_counters),
1669 OID_AUTO, "calc_nonzero", CTLFLAG_RD,
1671 "Total number of times pacing time worked out to non-zero");
1672 rack_paced_segments = counter_u64_alloc(M_WAITOK);
1673 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1674 SYSCTL_CHILDREN(rack_counters),
1675 OID_AUTO, "paced", CTLFLAG_RD,
1676 &rack_paced_segments,
1677 "Total number of times a segment send caused hptsi");
1678 rack_unpaced_segments = counter_u64_alloc(M_WAITOK);
1679 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1680 SYSCTL_CHILDREN(rack_counters),
1681 OID_AUTO, "unpaced", CTLFLAG_RD,
1682 &rack_unpaced_segments,
1683 "Total number of times a segment did not cause hptsi");
1684 rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
1685 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1686 SYSCTL_CHILDREN(rack_counters),
1687 OID_AUTO, "saw_enobufs", CTLFLAG_RD,
1689 "Total number of times a sends returned enobuf for non-hdwr paced connections");
1690 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK);
1691 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1692 SYSCTL_CHILDREN(rack_counters),
1693 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD,
1694 &rack_saw_enobuf_hw,
1695 "Total number of times a send returned enobuf for hdwr paced connections");
1696 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
1697 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1698 SYSCTL_CHILDREN(rack_counters),
1699 OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
1700 &rack_saw_enetunreach,
1701 "Total number of times a send received a enetunreachable");
1702 rack_hot_alloc = counter_u64_alloc(M_WAITOK);
1703 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1704 SYSCTL_CHILDREN(rack_counters),
1705 OID_AUTO, "alloc_hot", CTLFLAG_RD,
1707 "Total allocations from the top of our list");
1708 rack_to_alloc = counter_u64_alloc(M_WAITOK);
1709 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1710 SYSCTL_CHILDREN(rack_counters),
1711 OID_AUTO, "allocs", CTLFLAG_RD,
1713 "Total allocations of tracking structures");
1714 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
1715 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1716 SYSCTL_CHILDREN(rack_counters),
1717 OID_AUTO, "allochard", CTLFLAG_RD,
1718 &rack_to_alloc_hard,
1719 "Total allocations done with sleeping the hard way");
1720 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
1721 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1722 SYSCTL_CHILDREN(rack_counters),
1723 OID_AUTO, "allocemerg", CTLFLAG_RD,
1724 &rack_to_alloc_emerg,
1725 "Total allocations done from emergency cache");
1726 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK);
1727 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1728 SYSCTL_CHILDREN(rack_counters),
1729 OID_AUTO, "alloc_limited", CTLFLAG_RD,
1730 &rack_to_alloc_limited,
1731 "Total allocations dropped due to limit");
1732 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK);
1733 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1734 SYSCTL_CHILDREN(rack_counters),
1735 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD,
1736 &rack_alloc_limited_conns,
1737 "Connections with allocations dropped due to limit");
1738 rack_split_limited = counter_u64_alloc(M_WAITOK);
1739 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1740 SYSCTL_CHILDREN(rack_counters),
1741 OID_AUTO, "split_limited", CTLFLAG_RD,
1742 &rack_split_limited,
1743 "Split allocations dropped due to limit");
1745 for (i = 0; i < MAX_NUM_OF_CNTS; i++) {
1747 sprintf(name, "cmp_ack_cnt_%d", i);
1748 rack_proc_comp_ack[i] = counter_u64_alloc(M_WAITOK);
1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1750 SYSCTL_CHILDREN(rack_counters),
1751 OID_AUTO, name, CTLFLAG_RD,
1752 &rack_proc_comp_ack[i],
1753 "Number of compressed acks we processed");
1755 rack_large_ackcmp = counter_u64_alloc(M_WAITOK);
1756 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1757 SYSCTL_CHILDREN(rack_counters),
1758 OID_AUTO, "cmp_large_mbufs", CTLFLAG_RD,
1760 "Number of TCP connections with large mbuf's for compressed acks");
1761 rack_small_ackcmp = counter_u64_alloc(M_WAITOK);
1762 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1763 SYSCTL_CHILDREN(rack_counters),
1764 OID_AUTO, "cmp_small_mbufs", CTLFLAG_RD,
1766 "Number of TCP connections with small mbuf's for compressed acks");
1768 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK);
1769 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1770 SYSCTL_CHILDREN(rack_counters),
1771 OID_AUTO, "map_adjust_req", CTLFLAG_RD,
1772 &rack_adjust_map_bw,
1773 "Number of times we hit the case where the sb went up and down on a sendmap entry");
1775 rack_multi_single_eq = counter_u64_alloc(M_WAITOK);
1776 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1777 SYSCTL_CHILDREN(rack_counters),
1778 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD,
1779 &rack_multi_single_eq,
1780 "Number of compressed acks total represented");
1781 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK);
1782 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1783 SYSCTL_CHILDREN(rack_counters),
1784 OID_AUTO, "cmp_ack_not", CTLFLAG_RD,
1785 &rack_proc_non_comp_ack,
1786 "Number of non compresseds acks that we processed");
1789 rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
1790 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1791 SYSCTL_CHILDREN(rack_counters),
1792 OID_AUTO, "sack_long", CTLFLAG_RD,
1793 &rack_sack_proc_all,
1794 "Total times we had to walk whole list for sack processing");
1795 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
1796 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1797 SYSCTL_CHILDREN(rack_counters),
1798 OID_AUTO, "sack_restart", CTLFLAG_RD,
1799 &rack_sack_proc_restart,
1800 "Total times we had to walk whole list due to a restart");
1801 rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
1802 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1803 SYSCTL_CHILDREN(rack_counters),
1804 OID_AUTO, "sack_short", CTLFLAG_RD,
1805 &rack_sack_proc_short,
1806 "Total times we took shortcut for sack processing");
1807 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK);
1808 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1809 SYSCTL_CHILDREN(rack_counters),
1810 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD,
1811 &rack_enter_tlp_calc,
1812 "Total times we called calc-tlp");
1813 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK);
1814 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1815 SYSCTL_CHILDREN(rack_counters),
1816 OID_AUTO, "hit_tlp_method", CTLFLAG_RD,
1817 &rack_used_tlpmethod,
1818 "Total number of runt sacks");
1819 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK);
1820 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1821 SYSCTL_CHILDREN(rack_counters),
1822 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD,
1823 &rack_used_tlpmethod2,
1824 "Total number of times we hit TLP method 2");
1825 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK);
1826 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1827 SYSCTL_CHILDREN(rack_attack),
1828 OID_AUTO, "skipacked", CTLFLAG_RD,
1829 &rack_sack_skipped_acked,
1830 "Total number of times we skipped previously sacked");
1831 rack_sack_splits = counter_u64_alloc(M_WAITOK);
1832 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1833 SYSCTL_CHILDREN(rack_attack),
1834 OID_AUTO, "ofsplit", CTLFLAG_RD,
1836 "Total number of times we did the old fashion tree split");
1837 rack_progress_drops = counter_u64_alloc(M_WAITOK);
1838 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1839 SYSCTL_CHILDREN(rack_counters),
1840 OID_AUTO, "prog_drops", CTLFLAG_RD,
1841 &rack_progress_drops,
1842 "Total number of progress drops");
1843 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
1844 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1845 SYSCTL_CHILDREN(rack_counters),
1846 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
1847 &rack_input_idle_reduces,
1848 "Total number of idle reductions on input");
1849 rack_collapsed_win = counter_u64_alloc(M_WAITOK);
1850 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1851 SYSCTL_CHILDREN(rack_counters),
1852 OID_AUTO, "collapsed_win", CTLFLAG_RD,
1853 &rack_collapsed_win,
1854 "Total number of collapsed windows");
1855 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK);
1856 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1857 SYSCTL_CHILDREN(rack_counters),
1858 OID_AUTO, "tlp_nada", CTLFLAG_RD,
1859 &rack_tlp_does_nada,
1860 "Total number of nada tlp calls");
1861 rack_try_scwnd = counter_u64_alloc(M_WAITOK);
1862 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1863 SYSCTL_CHILDREN(rack_counters),
1864 OID_AUTO, "tried_scwnd", CTLFLAG_RD,
1866 "Total number of scwnd attempts");
1868 rack_per_timer_hole = counter_u64_alloc(M_WAITOK);
1869 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1870 SYSCTL_CHILDREN(rack_counters),
1871 OID_AUTO, "timer_hole", CTLFLAG_RD,
1872 &rack_per_timer_hole,
1873 "Total persists start in timer hole");
1875 rack_sbsndptr_wrong = counter_u64_alloc(M_WAITOK);
1876 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1877 SYSCTL_CHILDREN(rack_counters),
1878 OID_AUTO, "sndptr_wrong", CTLFLAG_RD,
1879 &rack_sbsndptr_wrong, "Total number of times the saved sbsndptr was incorret");
1880 rack_sbsndptr_right = counter_u64_alloc(M_WAITOK);
1881 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1882 SYSCTL_CHILDREN(rack_counters),
1883 OID_AUTO, "sndptr_right", CTLFLAG_RD,
1884 &rack_sbsndptr_right, "Total number of times the saved sbsndptr was corret");
1886 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
1887 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1888 OID_AUTO, "outsize", CTLFLAG_RD,
1889 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
1890 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
1891 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1892 OID_AUTO, "opts", CTLFLAG_RD,
1893 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
1894 SYSCTL_ADD_PROC(&rack_sysctl_ctx,
1895 SYSCTL_CHILDREN(rack_sysctl_root),
1896 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1897 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
1901 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a)
1903 if (SEQ_GEQ(b->r_start, a->r_start) &&
1904 SEQ_LT(b->r_start, a->r_end)) {
1906 * The entry b is within the
1908 * a -- |-------------|
1913 * b -- |-----------|
1916 } else if (SEQ_GEQ(b->r_start, a->r_end)) {
1918 * b falls as either the next
1919 * sequence block after a so a
1920 * is said to be smaller than b.
1930 * Whats left is where a is
1931 * larger than b. i.e:
1935 * b -- |--------------|
1940 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1941 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1944 rc_init_window(struct tcp_rack *rack)
1948 if (rack->rc_init_win == 0) {
1950 * Nothing set by the user, use the system stack
1953 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)));
1955 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win;
1960 rack_get_fixed_pacing_bw(struct tcp_rack *rack)
1962 if (IN_FASTRECOVERY(rack->rc_tp->t_flags))
1963 return (rack->r_ctl.rc_fixed_pacing_rate_rec);
1964 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
1965 return (rack->r_ctl.rc_fixed_pacing_rate_ss);
1967 return (rack->r_ctl.rc_fixed_pacing_rate_ca);
1971 rack_get_bw(struct tcp_rack *rack)
1973 if (rack->use_fixed_rate) {
1974 /* Return the fixed pacing rate */
1975 return (rack_get_fixed_pacing_bw(rack));
1977 if (rack->r_ctl.gp_bw == 0) {
1979 * We have yet no b/w measurement,
1980 * if we have a user set initial bw
1981 * return it. If we don't have that and
1982 * we have an srtt, use the tcp IW (10) to
1983 * calculate a fictional b/w over the SRTT
1984 * which is more or less a guess. Note
1985 * we don't use our IW from rack on purpose
1986 * so if we have like IW=30, we are not
1987 * calculating a "huge" b/w.
1990 if (rack->r_ctl.init_rate)
1991 return (rack->r_ctl.init_rate);
1993 /* Has the user set a max peak rate? */
1994 #ifdef NETFLIX_PEAKRATE
1995 if (rack->rc_tp->t_maxpeakrate)
1996 return (rack->rc_tp->t_maxpeakrate);
1998 /* Ok lets come up with the IW guess, if we have a srtt */
1999 if (rack->rc_tp->t_srtt == 0) {
2001 * Go with old pacing method
2002 * i.e. burst mitigation only.
2006 /* Ok lets get the initial TCP win (not racks) */
2007 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp));
2008 srtt = (uint64_t)rack->rc_tp->t_srtt;
2009 bw *= (uint64_t)USECS_IN_SECOND;
2011 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap))
2012 bw = rack->r_ctl.bw_rate_cap;
2017 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
2018 /* Averaging is done, we can return the value */
2019 bw = rack->r_ctl.gp_bw;
2021 /* Still doing initial average must calculate */
2022 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements;
2024 #ifdef NETFLIX_PEAKRATE
2025 if ((rack->rc_tp->t_maxpeakrate) &&
2026 (bw > rack->rc_tp->t_maxpeakrate)) {
2027 /* The user has set a peak rate to pace at
2028 * don't allow us to pace faster than that.
2030 return (rack->rc_tp->t_maxpeakrate);
2033 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap))
2034 bw = rack->r_ctl.bw_rate_cap;
2040 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm)
2042 if (rack->use_fixed_rate) {
2044 } else if (rack->in_probe_rtt && (rsm == NULL))
2045 return (rack->r_ctl.rack_per_of_gp_probertt);
2046 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
2047 rack->r_ctl.rack_per_of_gp_rec)) {
2049 /* a retransmission always use the recovery rate */
2050 return (rack->r_ctl.rack_per_of_gp_rec);
2051 } else if (rack->rack_rec_nonrxt_use_cr) {
2052 /* Directed to use the configured rate */
2053 goto configured_rate;
2054 } else if (rack->rack_no_prr &&
2055 (rack->r_ctl.rack_per_of_gp_rec > 100)) {
2056 /* No PRR, lets just use the b/w estimate only */
2060 * Here we may have a non-retransmit but we
2061 * have no overrides, so just use the recovery
2062 * rate (prr is in effect).
2064 return (rack->r_ctl.rack_per_of_gp_rec);
2068 /* For the configured rate we look at our cwnd vs the ssthresh */
2069 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
2070 return (rack->r_ctl.rack_per_of_gp_ss);
2072 return (rack->r_ctl.rack_per_of_gp_ca);
2076 rack_log_hdwr_pacing(struct tcp_rack *rack,
2077 uint64_t rate, uint64_t hw_rate, int line,
2078 int error, uint16_t mod)
2080 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2081 union tcp_log_stackspecific log;
2083 const struct ifnet *ifp;
2085 memset(&log, 0, sizeof(log));
2086 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff);
2087 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff);
2088 if (rack->r_ctl.crte) {
2089 ifp = rack->r_ctl.crte->ptbl->rs_ifp;
2090 } else if (rack->rc_inp->inp_route.ro_nh &&
2091 rack->rc_inp->inp_route.ro_nh->nh_ifp) {
2092 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp;
2096 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff);
2097 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff);
2099 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2100 log.u_bbr.bw_inuse = rate;
2101 log.u_bbr.flex5 = line;
2102 log.u_bbr.flex6 = error;
2103 log.u_bbr.flex7 = mod;
2104 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs;
2105 log.u_bbr.flex8 = rack->use_fixed_rate;
2106 log.u_bbr.flex8 <<= 1;
2107 log.u_bbr.flex8 |= rack->rack_hdrw_pacing;
2108 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
2109 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate;
2110 if (rack->r_ctl.crte)
2111 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate;
2113 log.u_bbr.cur_del_rate = 0;
2114 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req;
2115 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2116 &rack->rc_inp->inp_socket->so_rcv,
2117 &rack->rc_inp->inp_socket->so_snd,
2118 BBR_LOG_HDWR_PACE, 0,
2119 0, &log, false, &tv);
2124 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped)
2127 * We allow rack_per_of_gp_xx to dictate our bw rate we want.
2129 uint64_t bw_est, high_rate;
2132 gain = (uint64_t)rack_get_output_gain(rack, rsm);
2134 bw_est /= (uint64_t)100;
2135 /* Never fall below the minimum (def 64kbps) */
2136 if (bw_est < RACK_MIN_BW)
2137 bw_est = RACK_MIN_BW;
2138 if (rack->r_rack_hw_rate_caps) {
2139 /* Rate caps are in place */
2140 if (rack->r_ctl.crte != NULL) {
2141 /* We have a hdwr rate already */
2142 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
2143 if (bw_est >= high_rate) {
2144 /* We are capping bw at the highest rate table entry */
2145 rack_log_hdwr_pacing(rack,
2146 bw_est, high_rate, __LINE__,
2152 } else if ((rack->rack_hdrw_pacing == 0) &&
2153 (rack->rack_hdw_pace_ena) &&
2154 (rack->rack_attempt_hdwr_pace == 0) &&
2155 (rack->rc_inp->inp_route.ro_nh != NULL) &&
2156 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
2158 * Special case, we have not yet attempted hardware
2159 * pacing, and yet we may, when we do, find out if we are
2160 * above the highest rate. We need to know the maxbw for the interface
2161 * in question (if it supports ratelimiting). We get back
2162 * a 0, if the interface is not found in the RL lists.
2164 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
2166 /* Yep, we have a rate is it above this rate? */
2167 if (bw_est > high_rate) {
2179 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod)
2181 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2182 union tcp_log_stackspecific log;
2185 if ((mod != 1) && (rack_verbose_logging == 0)) {
2187 * We get 3 values currently for mod
2188 * 1 - We are retransmitting and this tells the reason.
2189 * 2 - We are clearing a dup-ack count.
2190 * 3 - We are incrementing a dup-ack count.
2192 * The clear/increment are only logged
2193 * if you have BBverbose on.
2197 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2198 log.u_bbr.flex1 = tsused;
2199 log.u_bbr.flex2 = thresh;
2200 log.u_bbr.flex3 = rsm->r_flags;
2201 log.u_bbr.flex4 = rsm->r_dupack;
2202 log.u_bbr.flex5 = rsm->r_start;
2203 log.u_bbr.flex6 = rsm->r_end;
2204 log.u_bbr.flex8 = mod;
2205 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2206 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2207 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2208 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2209 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2210 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2211 log.u_bbr.pacing_gain = rack->r_must_retran;
2212 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2213 &rack->rc_inp->inp_socket->so_rcv,
2214 &rack->rc_inp->inp_socket->so_snd,
2215 BBR_LOG_SETTINGS_CHG, 0,
2216 0, &log, false, &tv);
2221 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
2223 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2224 union tcp_log_stackspecific log;
2227 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2228 log.u_bbr.flex1 = rack->rc_tp->t_srtt;
2229 log.u_bbr.flex2 = to;
2230 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
2231 log.u_bbr.flex4 = slot;
2232 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot;
2233 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2234 log.u_bbr.flex7 = rack->rc_in_persist;
2235 log.u_bbr.flex8 = which;
2236 if (rack->rack_no_prr)
2237 log.u_bbr.pkts_out = 0;
2239 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
2240 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2241 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2242 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2243 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2244 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2245 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2246 log.u_bbr.pacing_gain = rack->r_must_retran;
2247 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift;
2248 log.u_bbr.lost = rack_rto_min;
2249 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2250 &rack->rc_inp->inp_socket->so_rcv,
2251 &rack->rc_inp->inp_socket->so_snd,
2252 BBR_LOG_TIMERSTAR, 0,
2253 0, &log, false, &tv);
2258 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm)
2260 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2261 union tcp_log_stackspecific log;
2264 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2265 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2266 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2267 log.u_bbr.flex8 = to_num;
2268 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
2269 log.u_bbr.flex2 = rack->rc_rack_rtt;
2271 log.u_bbr.flex3 = 0;
2273 log.u_bbr.flex3 = rsm->r_end - rsm->r_start;
2274 if (rack->rack_no_prr)
2275 log.u_bbr.flex5 = 0;
2277 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2278 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2279 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2280 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2281 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2282 log.u_bbr.pacing_gain = rack->r_must_retran;
2283 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2284 &rack->rc_inp->inp_socket->so_rcv,
2285 &rack->rc_inp->inp_socket->so_snd,
2287 0, &log, false, &tv);
2292 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack,
2293 struct rack_sendmap *prev,
2294 struct rack_sendmap *rsm,
2295 struct rack_sendmap *next,
2296 int flag, uint32_t th_ack, int line)
2298 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
2299 union tcp_log_stackspecific log;
2302 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2303 log.u_bbr.flex8 = flag;
2304 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2305 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2306 log.u_bbr.cur_del_rate = (uint64_t)prev;
2307 log.u_bbr.delRate = (uint64_t)rsm;
2308 log.u_bbr.rttProp = (uint64_t)next;
2309 log.u_bbr.flex7 = 0;
2311 log.u_bbr.flex1 = prev->r_start;
2312 log.u_bbr.flex2 = prev->r_end;
2313 log.u_bbr.flex7 |= 0x4;
2316 log.u_bbr.flex3 = rsm->r_start;
2317 log.u_bbr.flex4 = rsm->r_end;
2318 log.u_bbr.flex7 |= 0x2;
2321 log.u_bbr.flex5 = next->r_start;
2322 log.u_bbr.flex6 = next->r_end;
2323 log.u_bbr.flex7 |= 0x1;
2325 log.u_bbr.applimited = line;
2326 log.u_bbr.pkts_out = th_ack;
2327 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2328 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2329 if (rack->rack_no_prr)
2332 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt;
2333 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2334 &rack->rc_inp->inp_socket->so_rcv,
2335 &rack->rc_inp->inp_socket->so_snd,
2337 0, &log, false, &tv);
2342 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len,
2343 struct rack_sendmap *rsm, int conf)
2345 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
2346 union tcp_log_stackspecific log;
2348 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2349 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2350 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2351 log.u_bbr.flex1 = t;
2352 log.u_bbr.flex2 = len;
2353 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt;
2354 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
2355 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;
2356 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2357 log.u_bbr.flex7 = conf;
2358 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot;
2359 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
2360 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2361 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2362 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags;
2363 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2365 log.u_bbr.pkt_epoch = rsm->r_start;
2366 log.u_bbr.lost = rsm->r_end;
2367 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt;
2368 log.u_bbr.pacing_gain = rsm->r_flags;
2371 log.u_bbr.pkt_epoch = rack->rc_tp->iss;
2373 log.u_bbr.cwnd_gain = 0;
2374 log.u_bbr.pacing_gain = 0;
2376 /* Write out general bits of interest rrs here */
2377 log.u_bbr.use_lt_bw = rack->rc_highly_buffered;
2378 log.u_bbr.use_lt_bw <<= 1;
2379 log.u_bbr.use_lt_bw |= rack->forced_ack;
2380 log.u_bbr.use_lt_bw <<= 1;
2381 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul;
2382 log.u_bbr.use_lt_bw <<= 1;
2383 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
2384 log.u_bbr.use_lt_bw <<= 1;
2385 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
2386 log.u_bbr.use_lt_bw <<= 1;
2387 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
2388 log.u_bbr.use_lt_bw <<= 1;
2389 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
2390 log.u_bbr.use_lt_bw <<= 1;
2391 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom;
2392 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight;
2393 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts;
2394 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered;
2395 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts;
2396 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt;
2397 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
2398 log.u_bbr.bw_inuse <<= 32;
2400 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]);
2401 TCP_LOG_EVENTP(tp, NULL,
2402 &rack->rc_inp->inp_socket->so_rcv,
2403 &rack->rc_inp->inp_socket->so_snd,
2405 0, &log, false, &tv);
2412 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
2415 * Log the rtt sample we are
2416 * applying to the srtt algorithm in
2419 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2420 union tcp_log_stackspecific log;
2423 /* Convert our ms to a microsecond */
2424 memset(&log, 0, sizeof(log));
2425 log.u_bbr.flex1 = rtt;
2426 log.u_bbr.flex2 = rack->r_ctl.ack_count;
2427 log.u_bbr.flex3 = rack->r_ctl.sack_count;
2428 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
2429 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra;
2430 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2431 log.u_bbr.flex7 = 1;
2432 log.u_bbr.flex8 = rack->sack_attack_disable;
2433 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2434 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2435 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2436 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2437 log.u_bbr.pacing_gain = rack->r_must_retran;
2439 * We capture in delRate the upper 32 bits as
2440 * the confidence level we had declared, and the
2441 * lower 32 bits as the actual RTT using the arrival
2444 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence;
2445 log.u_bbr.delRate <<= 32;
2446 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt;
2447 /* Lets capture all the things that make up t_rtxcur */
2448 log.u_bbr.applimited = rack_rto_min;
2449 log.u_bbr.epoch = rack_rto_max;
2450 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop;
2451 log.u_bbr.lost = rack_rto_min;
2452 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop);
2453 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp);
2454 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec;
2455 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC;
2456 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec;
2457 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2458 &rack->rc_inp->inp_socket->so_rcv,
2459 &rack->rc_inp->inp_socket->so_snd,
2461 0, &log, false, &tv);
2466 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where)
2468 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
2469 union tcp_log_stackspecific log;
2472 /* Convert our ms to a microsecond */
2473 memset(&log, 0, sizeof(log));
2474 log.u_bbr.flex1 = rtt;
2475 log.u_bbr.flex2 = send_time;
2476 log.u_bbr.flex3 = ack_time;
2477 log.u_bbr.flex4 = where;
2478 log.u_bbr.flex7 = 2;
2479 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2480 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2481 &rack->rc_inp->inp_socket->so_rcv,
2482 &rack->rc_inp->inp_socket->so_snd,
2484 0, &log, false, &tv);
2491 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line)
2493 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
2494 union tcp_log_stackspecific log;
2497 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2498 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2499 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2500 log.u_bbr.flex1 = line;
2501 log.u_bbr.flex2 = tick;
2502 log.u_bbr.flex3 = tp->t_maxunacktime;
2503 log.u_bbr.flex4 = tp->t_acktime;
2504 log.u_bbr.flex8 = event;
2505 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2506 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2507 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2508 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2509 log.u_bbr.pacing_gain = rack->r_must_retran;
2510 TCP_LOG_EVENTP(tp, NULL,
2511 &rack->rc_inp->inp_socket->so_rcv,
2512 &rack->rc_inp->inp_socket->so_snd,
2513 BBR_LOG_PROGRESS, 0,
2514 0, &log, false, &tv);
2519 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv)
2521 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2522 union tcp_log_stackspecific log;
2524 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2525 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2526 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2527 log.u_bbr.flex1 = slot;
2528 if (rack->rack_no_prr)
2529 log.u_bbr.flex2 = 0;
2531 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt;
2532 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
2533 log.u_bbr.flex8 = rack->rc_in_persist;
2534 log.u_bbr.timeStamp = cts;
2535 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2536 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2537 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2538 log.u_bbr.pacing_gain = rack->r_must_retran;
2539 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2540 &rack->rc_inp->inp_socket->so_rcv,
2541 &rack->rc_inp->inp_socket->so_snd,
2543 0, &log, false, tv);
2548 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs)
2550 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2551 union tcp_log_stackspecific log;
2554 memset(&log, 0, sizeof(log));
2555 log.u_bbr.flex1 = did_out;
2556 log.u_bbr.flex2 = nxt_pkt;
2557 log.u_bbr.flex3 = way_out;
2558 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2559 if (rack->rack_no_prr)
2560 log.u_bbr.flex5 = 0;
2562 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2563 log.u_bbr.flex6 = nsegs;
2564 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs;
2565 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */
2566 log.u_bbr.flex7 <<= 1;
2567 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */
2568 log.u_bbr.flex7 <<= 1;
2569 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */
2570 log.u_bbr.flex8 = rack->rc_in_persist;
2571 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2572 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2573 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2574 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
2575 log.u_bbr.use_lt_bw <<= 1;
2576 log.u_bbr.use_lt_bw |= rack->r_might_revert;
2577 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2578 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2579 log.u_bbr.pacing_gain = rack->r_must_retran;
2580 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2581 &rack->rc_inp->inp_socket->so_rcv,
2582 &rack->rc_inp->inp_socket->so_snd,
2583 BBR_LOG_DOSEG_DONE, 0,
2584 0, &log, false, &tv);
2589 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm)
2591 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
2592 union tcp_log_stackspecific log;
2596 memset(&log, 0, sizeof(log));
2597 cts = tcp_get_usecs(&tv);
2598 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs;
2599 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
2600 log.u_bbr.flex4 = arg1;
2601 log.u_bbr.flex5 = arg2;
2602 log.u_bbr.flex6 = arg3;
2603 log.u_bbr.flex8 = frm;
2604 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2605 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2606 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2607 log.u_bbr.applimited = rack->r_ctl.rc_sacked;
2608 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2609 log.u_bbr.pacing_gain = rack->r_must_retran;
2610 TCP_LOG_EVENTP(tp, NULL,
2611 &tp->t_inpcb->inp_socket->so_rcv,
2612 &tp->t_inpcb->inp_socket->so_snd,
2613 TCP_HDWR_PACE_SIZE, 0,
2614 0, &log, false, &tv);
2619 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot,
2620 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use)
2622 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2623 union tcp_log_stackspecific log;
2626 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2627 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2628 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2629 log.u_bbr.flex1 = slot;
2630 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
2631 log.u_bbr.flex4 = reason;
2632 if (rack->rack_no_prr)
2633 log.u_bbr.flex5 = 0;
2635 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2636 log.u_bbr.flex7 = hpts_calling;
2637 log.u_bbr.flex8 = rack->rc_in_persist;
2638 log.u_bbr.lt_epoch = cwnd_to_use;
2639 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2640 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2641 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2642 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2643 log.u_bbr.pacing_gain = rack->r_must_retran;
2644 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2645 &rack->rc_inp->inp_socket->so_rcv,
2646 &rack->rc_inp->inp_socket->so_snd,
2648 tlen, &log, false, &tv);
2653 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts,
2654 struct timeval *tv, uint32_t flags_on_entry)
2656 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2657 union tcp_log_stackspecific log;
2659 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2660 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2661 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2662 log.u_bbr.flex1 = line;
2663 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to;
2664 log.u_bbr.flex3 = flags_on_entry;
2665 log.u_bbr.flex4 = us_cts;
2666 if (rack->rack_no_prr)
2667 log.u_bbr.flex5 = 0;
2669 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2670 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2671 log.u_bbr.flex7 = hpts_removed;
2672 log.u_bbr.flex8 = 1;
2673 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags;
2674 log.u_bbr.timeStamp = us_cts;
2675 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2676 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2677 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2678 log.u_bbr.pacing_gain = rack->r_must_retran;
2679 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2680 &rack->rc_inp->inp_socket->so_rcv,
2681 &rack->rc_inp->inp_socket->so_snd,
2682 BBR_LOG_TIMERCANC, 0,
2683 0, &log, false, tv);
2688 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
2689 uint32_t flex1, uint32_t flex2,
2690 uint32_t flex3, uint32_t flex4,
2691 uint32_t flex5, uint32_t flex6,
2692 uint16_t flex7, uint8_t mod)
2694 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2695 union tcp_log_stackspecific log;
2699 /* No you can't use 1, its for the real to cancel */
2702 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2703 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2704 log.u_bbr.flex1 = flex1;
2705 log.u_bbr.flex2 = flex2;
2706 log.u_bbr.flex3 = flex3;
2707 log.u_bbr.flex4 = flex4;
2708 log.u_bbr.flex5 = flex5;
2709 log.u_bbr.flex6 = flex6;
2710 log.u_bbr.flex7 = flex7;
2711 log.u_bbr.flex8 = mod;
2712 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2713 &rack->rc_inp->inp_socket->so_rcv,
2714 &rack->rc_inp->inp_socket->so_snd,
2715 BBR_LOG_TIMERCANC, 0,
2716 0, &log, false, &tv);
2721 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
2723 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2724 union tcp_log_stackspecific log;
2727 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2728 log.u_bbr.flex1 = timers;
2729 log.u_bbr.flex2 = ret;
2730 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
2731 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2732 log.u_bbr.flex5 = cts;
2733 if (rack->rack_no_prr)
2734 log.u_bbr.flex6 = 0;
2736 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt;
2737 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2738 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2739 log.u_bbr.pacing_gain = rack->r_must_retran;
2740 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2741 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2742 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2743 &rack->rc_inp->inp_socket->so_rcv,
2744 &rack->rc_inp->inp_socket->so_snd,
2745 BBR_LOG_TO_PROCESS, 0,
2746 0, &log, false, &tv);
2751 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd)
2753 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2754 union tcp_log_stackspecific log;
2757 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2758 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out;
2759 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs;
2760 if (rack->rack_no_prr)
2761 log.u_bbr.flex3 = 0;
2763 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt;
2764 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered;
2765 log.u_bbr.flex5 = rack->r_ctl.rc_sacked;
2766 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt;
2767 log.u_bbr.flex8 = frm;
2768 log.u_bbr.pkts_out = orig_cwnd;
2769 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2770 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2771 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
2772 log.u_bbr.use_lt_bw <<= 1;
2773 log.u_bbr.use_lt_bw |= rack->r_might_revert;
2774 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2775 &rack->rc_inp->inp_socket->so_rcv,
2776 &rack->rc_inp->inp_socket->so_snd,
2778 0, &log, false, &tv);
2782 #ifdef NETFLIX_EXP_DETECTION
2784 rack_log_sad(struct tcp_rack *rack, int event)
2786 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2787 union tcp_log_stackspecific log;
2790 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2791 log.u_bbr.flex1 = rack->r_ctl.sack_count;
2792 log.u_bbr.flex2 = rack->r_ctl.ack_count;
2793 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra;
2794 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
2795 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced;
2796 log.u_bbr.flex6 = tcp_sack_to_ack_thresh;
2797 log.u_bbr.pkts_out = tcp_sack_to_move_thresh;
2798 log.u_bbr.lt_epoch = (tcp_force_detection << 8);
2799 log.u_bbr.lt_epoch |= rack->do_detection;
2800 log.u_bbr.applimited = tcp_map_minimum;
2801 log.u_bbr.flex7 = rack->sack_attack_disable;
2802 log.u_bbr.flex8 = event;
2803 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2804 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2805 log.u_bbr.delivered = tcp_sad_decay_val;
2806 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2807 &rack->rc_inp->inp_socket->so_rcv,
2808 &rack->rc_inp->inp_socket->so_snd,
2809 TCP_SAD_DETECTION, 0,
2810 0, &log, false, &tv);
2816 rack_counter_destroy(void)
2820 counter_u64_free(rack_fto_send);
2821 counter_u64_free(rack_fto_rsm_send);
2822 counter_u64_free(rack_nfto_resend);
2823 counter_u64_free(rack_hw_pace_init_fail);
2824 counter_u64_free(rack_hw_pace_lost);
2825 counter_u64_free(rack_non_fto_send);
2826 counter_u64_free(rack_extended_rfo);
2827 counter_u64_free(rack_ack_total);
2828 counter_u64_free(rack_express_sack);
2829 counter_u64_free(rack_sack_total);
2830 counter_u64_free(rack_move_none);
2831 counter_u64_free(rack_move_some);
2832 counter_u64_free(rack_sack_attacks_detected);
2833 counter_u64_free(rack_sack_attacks_reversed);
2834 counter_u64_free(rack_sack_used_next_merge);
2835 counter_u64_free(rack_sack_used_prev_merge);
2836 counter_u64_free(rack_badfr);
2837 counter_u64_free(rack_badfr_bytes);
2838 counter_u64_free(rack_rtm_prr_retran);
2839 counter_u64_free(rack_rtm_prr_newdata);
2840 counter_u64_free(rack_timestamp_mismatch);
2841 counter_u64_free(rack_find_high);
2842 counter_u64_free(rack_reorder_seen);
2843 counter_u64_free(rack_tlp_tot);
2844 counter_u64_free(rack_tlp_newdata);
2845 counter_u64_free(rack_tlp_retran);
2846 counter_u64_free(rack_tlp_retran_bytes);
2847 counter_u64_free(rack_tlp_retran_fail);
2848 counter_u64_free(rack_to_tot);
2849 counter_u64_free(rack_to_arm_rack);
2850 counter_u64_free(rack_to_arm_tlp);
2851 counter_u64_free(rack_calc_zero);
2852 counter_u64_free(rack_calc_nonzero);
2853 counter_u64_free(rack_paced_segments);
2854 counter_u64_free(rack_unpaced_segments);
2855 counter_u64_free(rack_saw_enobuf);
2856 counter_u64_free(rack_saw_enobuf_hw);
2857 counter_u64_free(rack_saw_enetunreach);
2858 counter_u64_free(rack_hot_alloc);
2859 counter_u64_free(rack_to_alloc);
2860 counter_u64_free(rack_to_alloc_hard);
2861 counter_u64_free(rack_to_alloc_emerg);
2862 counter_u64_free(rack_to_alloc_limited);
2863 counter_u64_free(rack_alloc_limited_conns);
2864 counter_u64_free(rack_split_limited);
2865 for (i = 0; i < MAX_NUM_OF_CNTS; i++) {
2866 counter_u64_free(rack_proc_comp_ack[i]);
2868 counter_u64_free(rack_multi_single_eq);
2869 counter_u64_free(rack_proc_non_comp_ack);
2870 counter_u64_free(rack_sack_proc_all);
2871 counter_u64_free(rack_sack_proc_restart);
2872 counter_u64_free(rack_sack_proc_short);
2873 counter_u64_free(rack_enter_tlp_calc);
2874 counter_u64_free(rack_used_tlpmethod);
2875 counter_u64_free(rack_used_tlpmethod2);
2876 counter_u64_free(rack_sack_skipped_acked);
2877 counter_u64_free(rack_sack_splits);
2878 counter_u64_free(rack_progress_drops);
2879 counter_u64_free(rack_input_idle_reduces);
2880 counter_u64_free(rack_collapsed_win);
2881 counter_u64_free(rack_tlp_does_nada);
2882 counter_u64_free(rack_try_scwnd);
2883 counter_u64_free(rack_per_timer_hole);
2884 counter_u64_free(rack_large_ackcmp);
2885 counter_u64_free(rack_small_ackcmp);
2887 counter_u64_free(rack_adjust_map_bw);
2889 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
2890 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
2893 static struct rack_sendmap *
2894 rack_alloc(struct tcp_rack *rack)
2896 struct rack_sendmap *rsm;
2899 * First get the top of the list it in
2900 * theory is the "hottest" rsm we have,
2901 * possibly just freed by ack processing.
2903 if (rack->rc_free_cnt > rack_free_cache) {
2904 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
2905 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2906 counter_u64_add(rack_hot_alloc, 1);
2907 rack->rc_free_cnt--;
2911 * Once we get under our free cache we probably
2912 * no longer have a "hot" one available. Lets
2915 rsm = uma_zalloc(rack_zone, M_NOWAIT);
2917 rack->r_ctl.rc_num_maps_alloced++;
2918 counter_u64_add(rack_to_alloc, 1);
2922 * Dig in to our aux rsm's (the last two) since
2923 * UMA failed to get us one.
2925 if (rack->rc_free_cnt) {
2926 counter_u64_add(rack_to_alloc_emerg, 1);
2927 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
2928 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2929 rack->rc_free_cnt--;
2935 static struct rack_sendmap *
2936 rack_alloc_full_limit(struct tcp_rack *rack)
2938 if ((V_tcp_map_entries_limit > 0) &&
2939 (rack->do_detection == 0) &&
2940 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
2941 counter_u64_add(rack_to_alloc_limited, 1);
2942 if (!rack->alloc_limit_reported) {
2943 rack->alloc_limit_reported = 1;
2944 counter_u64_add(rack_alloc_limited_conns, 1);
2948 return (rack_alloc(rack));
2951 /* wrapper to allocate a sendmap entry, subject to a specific limit */
2952 static struct rack_sendmap *
2953 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type)
2955 struct rack_sendmap *rsm;
2958 /* currently there is only one limit type */
2959 if (V_tcp_map_split_limit > 0 &&
2960 (rack->do_detection == 0) &&
2961 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) {
2962 counter_u64_add(rack_split_limited, 1);
2963 if (!rack->alloc_limit_reported) {
2964 rack->alloc_limit_reported = 1;
2965 counter_u64_add(rack_alloc_limited_conns, 1);
2971 /* allocate and mark in the limit type, if set */
2972 rsm = rack_alloc(rack);
2973 if (rsm != NULL && limit_type) {
2974 rsm->r_limit_type = limit_type;
2975 rack->r_ctl.rc_num_split_allocs++;
2981 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
2983 if (rsm->r_flags & RACK_APP_LIMITED) {
2984 if (rack->r_ctl.rc_app_limited_cnt > 0) {
2985 rack->r_ctl.rc_app_limited_cnt--;
2988 if (rsm->r_limit_type) {
2989 /* currently there is only one limit type */
2990 rack->r_ctl.rc_num_split_allocs--;
2992 if (rsm == rack->r_ctl.rc_first_appl) {
2993 if (rack->r_ctl.rc_app_limited_cnt == 0)
2994 rack->r_ctl.rc_first_appl = NULL;
2996 /* Follow the next one out */
2997 struct rack_sendmap fe;
2999 fe.r_start = rsm->r_nseq_appl;
3000 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
3003 if (rsm == rack->r_ctl.rc_resend)
3004 rack->r_ctl.rc_resend = NULL;
3005 if (rsm == rack->r_ctl.rc_rsm_at_retran)
3006 rack->r_ctl.rc_rsm_at_retran = NULL;
3007 if (rsm == rack->r_ctl.rc_end_appl)
3008 rack->r_ctl.rc_end_appl = NULL;
3009 if (rack->r_ctl.rc_tlpsend == rsm)
3010 rack->r_ctl.rc_tlpsend = NULL;
3011 if (rack->r_ctl.rc_sacklast == rsm)
3012 rack->r_ctl.rc_sacklast = NULL;
3013 memset(rsm, 0, sizeof(struct rack_sendmap));
3014 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext);
3015 rack->rc_free_cnt++;
3019 rack_free_trim(struct tcp_rack *rack)
3021 struct rack_sendmap *rsm;
3024 * Free up all the tail entries until
3025 * we get our list down to the limit.
3027 while (rack->rc_free_cnt > rack_free_cache) {
3028 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head);
3029 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
3030 rack->rc_free_cnt--;
3031 uma_zfree(rack_zone, rsm);
3037 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack)
3039 uint64_t srtt, bw, len, tim;
3040 uint32_t segsiz, def_len, minl;
3042 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
3043 def_len = rack_def_data_window * segsiz;
3044 if (rack->rc_gp_filled == 0) {
3046 * We have no measurement (IW is in flight?) so
3047 * we can only guess using our data_window sysctl
3048 * value (usually 100MSS).
3053 * Now we have a number of factors to consider.
3055 * 1) We have a desired BDP which is usually
3057 * 2) We have a minimum number of rtt's usually 1 SRTT
3058 * but we allow it too to be more.
3059 * 3) We want to make sure a measurement last N useconds (if
3060 * we have set rack_min_measure_usec.
3062 * We handle the first concern here by trying to create a data
3063 * window of max(rack_def_data_window, DesiredBDP). The
3064 * second concern we handle in not letting the measurement
3065 * window end normally until at least the required SRTT's
3066 * have gone by which is done further below in
3067 * rack_enough_for_measurement(). Finally the third concern
3068 * we also handle here by calculating how long that time
3069 * would take at the current BW and then return the
3070 * max of our first calculation and that length. Note
3071 * that if rack_min_measure_usec is 0, we don't deal
3072 * with concern 3. Also for both Concern 1 and 3 an
3073 * application limited period could end the measurement
3076 * So lets calculate the BDP with the "known" b/w using
3077 * the SRTT has our rtt and then multiply it by the
3080 bw = rack_get_bw(rack);
3081 srtt = (uint64_t)tp->t_srtt;
3083 len /= (uint64_t)HPTS_USEC_IN_SEC;
3084 len *= max(1, rack_goal_bdp);
3085 /* Now we need to round up to the nearest MSS */
3086 len = roundup(len, segsiz);
3087 if (rack_min_measure_usec) {
3088 /* Now calculate our min length for this b/w */
3089 tim = rack_min_measure_usec;
3090 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC;
3093 minl = roundup(minl, segsiz);
3098 * Now if we have a very small window we want
3099 * to attempt to get the window that is
3100 * as small as possible. This happens on
3101 * low b/w connections and we don't want to
3102 * span huge numbers of rtt's between measurements.
3104 * We basically include 2 over our "MIN window" so
3105 * that the measurement can be shortened (possibly) by
3109 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz)));
3111 return (max((uint32_t)len, def_len));
3116 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack)
3118 uint32_t tim, srtts, segsiz;
3121 * Has enough time passed for the GP measurement to be valid?
3123 if ((tp->snd_max == tp->snd_una) ||
3124 (th_ack == tp->snd_max)){
3128 if (SEQ_LT(th_ack, tp->gput_seq)) {
3129 /* Not enough bytes yet */
3132 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
3133 if (SEQ_LT(th_ack, tp->gput_ack) &&
3134 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
3135 /* Not enough bytes yet */
3138 if (rack->r_ctl.rc_first_appl &&
3139 (rack->r_ctl.rc_first_appl->r_start == th_ack)) {
3141 * We are up to the app limited point
3142 * we have to measure irrespective of the time..
3146 /* Now what about time? */
3147 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts);
3148 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts;
3152 /* Nope not even a full SRTT has passed */
3157 rack_log_timely(struct tcp_rack *rack,
3158 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd,
3159 uint64_t up_bnd, int line, uint8_t method)
3161 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
3162 union tcp_log_stackspecific log;
3165 memset(&log, 0, sizeof(log));
3166 log.u_bbr.flex1 = logged;
3167 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt;
3168 log.u_bbr.flex2 <<= 4;
3169 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt;
3170 log.u_bbr.flex2 <<= 4;
3171 log.u_bbr.flex2 |= rack->rc_gp_incr;
3172 log.u_bbr.flex2 <<= 4;
3173 log.u_bbr.flex2 |= rack->rc_gp_bwred;
3174 log.u_bbr.flex3 = rack->rc_gp_incr;
3175 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
3176 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca;
3177 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec;
3178 log.u_bbr.flex7 = rack->rc_gp_bwred;
3179 log.u_bbr.flex8 = method;
3180 log.u_bbr.cur_del_rate = cur_bw;
3181 log.u_bbr.delRate = low_bnd;
3182 log.u_bbr.bw_inuse = up_bnd;
3183 log.u_bbr.rttProp = rack_get_bw(rack);
3184 log.u_bbr.pkt_epoch = line;
3185 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
3186 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3187 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3188 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
3189 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
3190 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom;
3191 log.u_bbr.cwnd_gain <<= 1;
3192 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec;
3193 log.u_bbr.cwnd_gain <<= 1;
3194 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
3195 log.u_bbr.cwnd_gain <<= 1;
3196 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
3197 log.u_bbr.lost = rack->r_ctl.rc_loss_count;
3198 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3199 &rack->rc_inp->inp_socket->so_rcv,
3200 &rack->rc_inp->inp_socket->so_snd,
3202 0, &log, false, &tv);
3207 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult)
3210 * Before we increase we need to know if
3211 * the estimate just made was less than
3212 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est)
3214 * If we already are pacing at a fast enough
3215 * rate to push us faster there is no sense of
3218 * We first caculate our actual pacing rate (ss or ca multipler
3219 * times our cur_bw).
3221 * Then we take the last measured rate and multipy by our
3222 * maximum pacing overage to give us a max allowable rate.
3224 * If our act_rate is smaller than our max_allowable rate
3225 * then we should increase. Else we should hold steady.
3228 uint64_t act_rate, max_allow_rate;
3230 if (rack_timely_no_stopping)
3233 if ((cur_bw == 0) || (last_bw_est == 0)) {
3235 * Initial startup case or
3236 * everything is acked case.
3238 rack_log_timely(rack, mult, cur_bw, 0, 0,
3244 * We can always pace at or slightly above our rate.
3246 rack_log_timely(rack, mult, cur_bw, 0, 0,
3250 act_rate = cur_bw * (uint64_t)mult;
3252 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100);
3253 max_allow_rate /= 100;
3254 if (act_rate < max_allow_rate) {
3256 * Here the rate we are actually pacing at
3257 * is smaller than 10% above our last measurement.
3258 * This means we are pacing below what we would
3259 * like to try to achieve (plus some wiggle room).
3261 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
3266 * Here we are already pacing at least rack_max_per_above(10%)
3267 * what we are getting back. This indicates most likely
3268 * that we are being limited (cwnd/rwnd/app) and can't
3269 * get any more b/w. There is no sense of trying to
3270 * raise up the pacing rate its not speeding us up
3271 * and we already are pacing faster than we are getting.
3273 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
3280 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack)
3283 * When we drag bottom, we want to assure
3284 * that no multiplier is below 1.0, if so
3285 * we want to restore it to at least that.
3287 if (rack->r_ctl.rack_per_of_gp_rec < 100) {
3288 /* This is unlikely we usually do not touch recovery */
3289 rack->r_ctl.rack_per_of_gp_rec = 100;
3291 if (rack->r_ctl.rack_per_of_gp_ca < 100) {
3292 rack->r_ctl.rack_per_of_gp_ca = 100;
3294 if (rack->r_ctl.rack_per_of_gp_ss < 100) {
3295 rack->r_ctl.rack_per_of_gp_ss = 100;
3300 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack)
3302 if (rack->r_ctl.rack_per_of_gp_ca > 100) {
3303 rack->r_ctl.rack_per_of_gp_ca = 100;
3305 if (rack->r_ctl.rack_per_of_gp_ss > 100) {
3306 rack->r_ctl.rack_per_of_gp_ss = 100;
3311 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override)
3313 int32_t calc, logged, plus;
3319 * override is passed when we are
3320 * loosing b/w and making one last
3321 * gasp at trying to not loose out
3322 * to a new-reno flow.
3326 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */
3327 if (rack->rc_gp_incr &&
3328 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) {
3330 * Reset and get 5 strokes more before the boost. Note
3331 * that the count is 0 based so we have to add one.
3334 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST;
3335 rack->rc_gp_timely_inc_cnt = 0;
3337 plus = (uint32_t)rack_gp_increase_per;
3338 /* Must be at least 1% increase for true timely increases */
3340 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0)))
3342 if (rack->rc_gp_saw_rec &&
3343 (rack->rc_gp_no_rec_chg == 0) &&
3344 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3345 rack->r_ctl.rack_per_of_gp_rec)) {
3346 /* We have been in recovery ding it too */
3347 calc = rack->r_ctl.rack_per_of_gp_rec + plus;
3351 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc;
3352 if (rack_per_upper_bound_ss &&
3353 (rack->rc_dragged_bottom == 0) &&
3354 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss))
3355 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss;
3357 if (rack->rc_gp_saw_ca &&
3358 (rack->rc_gp_saw_ss == 0) &&
3359 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3360 rack->r_ctl.rack_per_of_gp_ca)) {
3362 calc = rack->r_ctl.rack_per_of_gp_ca + plus;
3366 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc;
3367 if (rack_per_upper_bound_ca &&
3368 (rack->rc_dragged_bottom == 0) &&
3369 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca))
3370 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca;
3372 if (rack->rc_gp_saw_ss &&
3373 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3374 rack->r_ctl.rack_per_of_gp_ss)) {
3376 calc = rack->r_ctl.rack_per_of_gp_ss + plus;
3379 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc;
3380 if (rack_per_upper_bound_ss &&
3381 (rack->rc_dragged_bottom == 0) &&
3382 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss))
3383 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss;
3387 (rack->rc_gp_incr == 0)){
3388 /* Go into increment mode */
3389 rack->rc_gp_incr = 1;
3390 rack->rc_gp_timely_inc_cnt = 0;
3392 if (rack->rc_gp_incr &&
3394 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) {
3395 rack->rc_gp_timely_inc_cnt++;
3397 rack_log_timely(rack, logged, plus, 0, 0,
3402 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff)
3405 * norm_grad = rtt_diff / minrtt;
3406 * new_per = curper * (1 - B * norm_grad)
3408 * B = rack_gp_decrease_per (default 10%)
3409 * rtt_dif = input var current rtt-diff
3410 * curper = input var current percentage
3411 * minrtt = from rack filter
3416 perf = (((uint64_t)curper * ((uint64_t)1000000 -
3417 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 *
3418 (((uint64_t)rtt_diff * (uint64_t)1000000)/
3419 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/
3420 (uint64_t)1000000)) /
3422 if (perf > curper) {
3426 return ((uint32_t)perf);
3430 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt)
3434 * result = curper * (1 - (B * ( 1 - ------ ))
3437 * B = rack_gp_decrease_per (default 10%)
3438 * highrttthresh = filter_min * rack_gp_rtt_maxmul
3441 uint32_t highrttthresh;
3443 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
3445 perf = (((uint64_t)curper * ((uint64_t)1000000 -
3446 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 -
3447 ((uint64_t)highrttthresh * (uint64_t)1000000) /
3448 (uint64_t)rtt)) / 100)) /(uint64_t)1000000);
3453 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff)
3455 uint64_t logvar, logvar2, logvar3;
3456 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val;
3458 if (rack->rc_gp_incr) {
3459 /* Turn off increment counting */
3460 rack->rc_gp_incr = 0;
3461 rack->rc_gp_timely_inc_cnt = 0;
3463 ss_red = ca_red = rec_red = 0;
3465 /* Calculate the reduction value */
3469 /* Must be at least 1% reduction */
3470 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) {
3471 /* We have been in recovery ding it too */
3472 if (timely_says == 2) {
3473 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt);
3474 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3480 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3481 if (rack->r_ctl.rack_per_of_gp_rec > val) {
3482 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val);
3483 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val;
3485 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
3488 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec)
3489 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
3492 if (rack->rc_gp_saw_ss) {
3494 if (timely_says == 2) {
3495 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt);
3496 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3502 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff);
3503 if (rack->r_ctl.rack_per_of_gp_ss > new_per) {
3504 ss_red = rack->r_ctl.rack_per_of_gp_ss - val;
3505 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val;
3508 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
3512 logvar2 = (uint32_t)rtt;
3514 logvar2 |= (uint32_t)rtt_diff;
3515 logvar3 = rack_gp_rtt_maxmul;
3517 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3518 rack_log_timely(rack, timely_says,
3520 logvar, __LINE__, 10);
3522 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss)
3523 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
3525 } else if (rack->rc_gp_saw_ca) {
3527 if (timely_says == 2) {
3528 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt);
3529 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3535 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff);
3536 if (rack->r_ctl.rack_per_of_gp_ca > val) {
3537 ca_red = rack->r_ctl.rack_per_of_gp_ca - val;
3538 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val;
3540 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
3545 logvar2 = (uint32_t)rtt;
3547 logvar2 |= (uint32_t)rtt_diff;
3548 logvar3 = rack_gp_rtt_maxmul;
3550 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3551 rack_log_timely(rack, timely_says,
3553 logvar, __LINE__, 10);
3555 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca)
3556 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
3559 if (rack->rc_gp_timely_dec_cnt < 0x7) {
3560 rack->rc_gp_timely_dec_cnt++;
3561 if (rack_timely_dec_clear &&
3562 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear))
3563 rack->rc_gp_timely_dec_cnt = 0;
3568 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar,
3573 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts,
3574 uint32_t rtt, uint32_t line, uint8_t reas)
3576 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
3577 union tcp_log_stackspecific log;
3580 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3581 log.u_bbr.flex1 = line;
3582 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts;
3583 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts;
3584 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
3585 log.u_bbr.flex5 = rtt;
3586 log.u_bbr.flex6 = rack->rc_highly_buffered;
3587 log.u_bbr.flex6 <<= 1;
3588 log.u_bbr.flex6 |= rack->forced_ack;
3589 log.u_bbr.flex6 <<= 1;
3590 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul;
3591 log.u_bbr.flex6 <<= 1;
3592 log.u_bbr.flex6 |= rack->in_probe_rtt;
3593 log.u_bbr.flex6 <<= 1;
3594 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt;
3595 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt;
3596 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca;
3597 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec;
3598 log.u_bbr.flex8 = reas;
3599 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3600 log.u_bbr.delRate = rack_get_bw(rack);
3601 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt;
3602 log.u_bbr.cur_del_rate <<= 32;
3603 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt;
3604 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered;
3605 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
3606 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3607 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
3608 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
3609 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts;
3610 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight;
3611 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3612 log.u_bbr.rttProp = us_cts;
3613 log.u_bbr.rttProp <<= 32;
3614 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt;
3615 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3616 &rack->rc_inp->inp_socket->so_rcv,
3617 &rack->rc_inp->inp_socket->so_snd,
3618 BBR_LOG_RTT_SHRINKS, 0,
3619 0, &log, false, &rack->r_ctl.act_rcv_time);
3624 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt)
3628 bwdp = rack_get_bw(rack);
3629 bwdp *= (uint64_t)rtt;
3630 bwdp /= (uint64_t)HPTS_USEC_IN_SEC;
3631 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz);
3632 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) {
3634 * A window protocol must be able to have 4 packets
3635 * outstanding as the floor in order to function
3636 * (especially considering delayed ack :D).
3638 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs);
3643 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts)
3646 * ProbeRTT is a bit different in rack_pacing than in
3647 * BBR. It is like BBR in that it uses the lowering of
3648 * the RTT as a signal that we saw something new and
3649 * counts from there for how long between. But it is
3650 * different in that its quite simple. It does not
3651 * play with the cwnd and wait until we get down
3652 * to N segments outstanding and hold that for
3653 * 200ms. Instead it just sets the pacing reduction
3654 * rate to a set percentage (70 by default) and hold
3655 * that for a number of recent GP Srtt's.
3659 if (rack->rc_gp_dyn_mul == 0)
3662 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) {
3666 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
3667 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
3669 * Stop the goodput now, the idea here is
3670 * that future measurements with in_probe_rtt
3671 * won't register if they are not greater so
3672 * we want to get what info (if any) is available
3675 rack_do_goodput_measurement(rack->rc_tp, rack,
3676 rack->rc_tp->snd_una, __LINE__);
3678 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
3679 rack->r_ctl.rc_time_probertt_entered = us_cts;
3680 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
3681 rack->r_ctl.rc_pace_min_segs);
3682 rack->in_probe_rtt = 1;
3683 rack->measure_saw_probe_rtt = 1;
3684 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
3685 rack->r_ctl.rc_time_probertt_starts = 0;
3686 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt;
3687 if (rack_probertt_use_min_rtt_entry)
3688 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
3690 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt);
3691 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3692 __LINE__, RACK_RTTS_ENTERPROBE);
3696 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts)
3698 struct rack_sendmap *rsm;
3701 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
3702 rack->r_ctl.rc_pace_min_segs);
3703 rack->in_probe_rtt = 0;
3704 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
3705 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
3707 * Stop the goodput now, the idea here is
3708 * that future measurements with in_probe_rtt
3709 * won't register if they are not greater so
3710 * we want to get what info (if any) is available
3713 rack_do_goodput_measurement(rack->rc_tp, rack,
3714 rack->rc_tp->snd_una, __LINE__);
3715 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
3717 * We don't have enough data to make a measurement.
3718 * So lets just stop and start here after exiting
3719 * probe-rtt. We probably are not interested in
3720 * the results anyway.
3722 rack->rc_tp->t_flags &= ~TF_GPUTINPROG;
3725 * Measurements through the current snd_max are going
3726 * to be limited by the slower pacing rate.
3728 * We need to mark these as app-limited so we
3729 * don't collapse the b/w.
3731 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
3732 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
3733 if (rack->r_ctl.rc_app_limited_cnt == 0)
3734 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
3737 * Go out to the end app limited and mark
3738 * this new one as next and move the end_appl up
3741 if (rack->r_ctl.rc_end_appl)
3742 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
3743 rack->r_ctl.rc_end_appl = rsm;
3745 rsm->r_flags |= RACK_APP_LIMITED;
3746 rack->r_ctl.rc_app_limited_cnt++;
3749 * Now, we need to examine our pacing rate multipliers.
3750 * If its under 100%, we need to kick it back up to
3751 * 100%. We also don't let it be over our "max" above
3752 * the actual rate i.e. 100% + rack_clamp_atexit_prtt.
3753 * Note setting clamp_atexit_prtt to 0 has the effect
3754 * of setting CA/SS to 100% always at exit (which is
3755 * the default behavior).
3757 if (rack_probertt_clear_is) {
3758 rack->rc_gp_incr = 0;
3759 rack->rc_gp_bwred = 0;
3760 rack->rc_gp_timely_inc_cnt = 0;
3761 rack->rc_gp_timely_dec_cnt = 0;
3763 /* Do we do any clamping at exit? */
3764 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) {
3765 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp;
3766 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp;
3768 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) {
3769 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt;
3770 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt;
3773 * Lets set rtt_diff to 0, so that we will get a "boost"
3776 rack->r_ctl.rc_rtt_diff = 0;
3778 /* Clear all flags so we start fresh */
3779 rack->rc_tp->t_bytes_acked = 0;
3780 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND;
3782 * If configured to, set the cwnd and ssthresh to
3785 if (rack_probe_rtt_sets_cwnd) {
3789 /* Set ssthresh so we get into CA once we hit our target */
3790 if (rack_probertt_use_min_rtt_exit == 1) {
3791 /* Set to min rtt */
3792 rack_set_prtt_target(rack, segsiz,
3793 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
3794 } else if (rack_probertt_use_min_rtt_exit == 2) {
3795 /* Set to current gp rtt */
3796 rack_set_prtt_target(rack, segsiz,
3797 rack->r_ctl.rc_gp_srtt);
3798 } else if (rack_probertt_use_min_rtt_exit == 3) {
3799 /* Set to entry gp rtt */
3800 rack_set_prtt_target(rack, segsiz,
3801 rack->r_ctl.rc_entry_gp_rtt);
3806 sum = rack->r_ctl.rc_entry_gp_rtt;
3808 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt));
3811 * A highly buffered path needs
3812 * cwnd space for timely to work.
3813 * Lets set things up as if
3814 * we are heading back here again.
3816 setval = rack->r_ctl.rc_entry_gp_rtt;
3817 } else if (sum >= 15) {
3819 * Lets take the smaller of the
3820 * two since we are just somewhat
3823 setval = rack->r_ctl.rc_gp_srtt;
3824 if (setval > rack->r_ctl.rc_entry_gp_rtt)
3825 setval = rack->r_ctl.rc_entry_gp_rtt;
3828 * Here we are not highly buffered
3829 * and should pick the min we can to
3830 * keep from causing loss.
3832 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3834 rack_set_prtt_target(rack, segsiz,
3837 if (rack_probe_rtt_sets_cwnd > 1) {
3838 /* There is a percentage here to boost */
3839 ebdp = rack->r_ctl.rc_target_probertt_flight;
3840 ebdp *= rack_probe_rtt_sets_cwnd;
3842 setto = rack->r_ctl.rc_target_probertt_flight + ebdp;
3844 setto = rack->r_ctl.rc_target_probertt_flight;
3845 rack->rc_tp->snd_cwnd = roundup(setto, segsiz);
3846 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) {
3848 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs;
3850 /* If we set in the cwnd also set the ssthresh point so we are in CA */
3851 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1);
3853 rack_log_rtt_shrinks(rack, us_cts,
3854 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3855 __LINE__, RACK_RTTS_EXITPROBE);
3856 /* Clear times last so log has all the info */
3857 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max;
3858 rack->r_ctl.rc_time_probertt_entered = us_cts;
3859 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
3860 rack->r_ctl.rc_time_of_last_probertt = us_cts;
3864 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts)
3866 /* Check in on probe-rtt */
3867 if (rack->rc_gp_filled == 0) {
3868 /* We do not do p-rtt unless we have gp measurements */
3871 if (rack->in_probe_rtt) {
3872 uint64_t no_overflow;
3873 uint32_t endtime, must_stay;
3875 if (rack->r_ctl.rc_went_idle_time &&
3876 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) {
3878 * We went idle during prtt, just exit now.
3880 rack_exit_probertt(rack, us_cts);
3881 } else if (rack_probe_rtt_safety_val &&
3882 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) &&
3883 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) {
3885 * Probe RTT safety value triggered!
3887 rack_log_rtt_shrinks(rack, us_cts,
3888 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3889 __LINE__, RACK_RTTS_SAFETY);
3890 rack_exit_probertt(rack, us_cts);
3892 /* Calculate the max we will wait */
3893 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait);
3894 if (rack->rc_highly_buffered)
3895 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp);
3896 /* Calculate the min we must wait */
3897 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain);
3898 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) &&
3899 TSTMP_LT(us_cts, endtime)) {
3901 /* Do we lower more? */
3903 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered))
3904 calc = us_cts - rack->r_ctl.rc_time_probertt_entered;
3907 calc /= max(rack->r_ctl.rc_gp_srtt, 1);
3910 calc *= rack_per_of_gp_probertt_reduce;
3911 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc;
3913 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh)
3914 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh;
3916 /* We must reach target or the time set */
3919 if (rack->r_ctl.rc_time_probertt_starts == 0) {
3920 if ((TSTMP_LT(us_cts, must_stay) &&
3921 rack->rc_highly_buffered) ||
3922 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) >
3923 rack->r_ctl.rc_target_probertt_flight)) {
3924 /* We are not past the must_stay time */
3927 rack_log_rtt_shrinks(rack, us_cts,
3928 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3929 __LINE__, RACK_RTTS_REACHTARGET);
3930 rack->r_ctl.rc_time_probertt_starts = us_cts;
3931 if (rack->r_ctl.rc_time_probertt_starts == 0)
3932 rack->r_ctl.rc_time_probertt_starts = 1;
3933 /* Restore back to our rate we want to pace at in prtt */
3934 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
3937 * Setup our end time, some number of gp_srtts plus 200ms.
3939 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt *
3940 (uint64_t)rack_probertt_gpsrtt_cnt_mul);
3941 if (rack_probertt_gpsrtt_cnt_div)
3942 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div);
3945 endtime += rack_min_probertt_hold;
3946 endtime += rack->r_ctl.rc_time_probertt_starts;
3947 if (TSTMP_GEQ(us_cts, endtime)) {
3948 /* yes, exit probertt */
3949 rack_exit_probertt(rack, us_cts);
3952 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) {
3953 /* Go into probertt, its been too long since we went lower */
3954 rack_enter_probertt(rack, us_cts);
3959 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est,
3960 uint32_t rtt, int32_t rtt_diff)
3962 uint64_t cur_bw, up_bnd, low_bnd, subfr;
3965 if ((rack->rc_gp_dyn_mul == 0) ||
3966 (rack->use_fixed_rate) ||
3967 (rack->in_probe_rtt) ||
3968 (rack->rc_always_pace == 0)) {
3969 /* No dynamic GP multipler in play */
3972 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start;
3973 cur_bw = rack_get_bw(rack);
3974 /* Calculate our up and down range */
3975 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up;
3977 up_bnd += rack->r_ctl.last_gp_comp_bw;
3979 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down;
3981 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr;
3982 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) {
3984 * This is the case where our RTT is above
3985 * the max target and we have been configured
3986 * to just do timely no bonus up stuff in that case.
3988 * There are two configurations, set to 1, and we
3989 * just do timely if we are over our max. If its
3990 * set above 1 then we slam the multipliers down
3991 * to 100 and then decrement per timely.
3993 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3995 if (rack->r_ctl.rc_no_push_at_mrtt > 1)
3996 rack_validate_multipliers_at_or_below_100(rack);
3997 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
3998 } else if ((last_bw_est < low_bnd) && !losses) {
4000 * We are decreasing this is a bit complicated this
4001 * means we are loosing ground. This could be
4002 * because another flow entered and we are competing
4003 * for b/w with it. This will push the RTT up which
4004 * makes timely unusable unless we want to get shoved
4005 * into a corner and just be backed off (the age
4006 * old problem with delay based CC).
4008 * On the other hand if it was a route change we
4009 * would like to stay somewhat contained and not
4010 * blow out the buffers.
4012 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4014 rack->r_ctl.last_gp_comp_bw = cur_bw;
4015 if (rack->rc_gp_bwred == 0) {
4016 /* Go into reduction counting */
4017 rack->rc_gp_bwred = 1;
4018 rack->rc_gp_timely_dec_cnt = 0;
4020 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) ||
4021 (timely_says == 0)) {
4023 * Push another time with a faster pacing
4024 * to try to gain back (we include override to
4025 * get a full raise factor).
4027 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) ||
4028 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) ||
4029 (timely_says == 0) ||
4030 (rack_down_raise_thresh == 0)) {
4032 * Do an override up in b/w if we were
4033 * below the threshold or if the threshold
4034 * is zero we always do the raise.
4036 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1);
4038 /* Log it stays the same */
4039 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0,
4042 rack->rc_gp_timely_dec_cnt++;
4043 /* We are not incrementing really no-count */
4044 rack->rc_gp_incr = 0;
4045 rack->rc_gp_timely_inc_cnt = 0;
4048 * Lets just use the RTT
4049 * information and give up
4054 } else if ((timely_says != 2) &&
4056 (last_bw_est > up_bnd)) {
4058 * We are increasing b/w lets keep going, updating
4059 * our b/w and ignoring any timely input, unless
4060 * of course we are at our max raise (if there is one).
4063 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4065 rack->r_ctl.last_gp_comp_bw = cur_bw;
4066 if (rack->rc_gp_saw_ss &&
4067 rack_per_upper_bound_ss &&
4068 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) {
4070 * In cases where we can't go higher
4071 * we should just use timely.
4075 if (rack->rc_gp_saw_ca &&
4076 rack_per_upper_bound_ca &&
4077 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) {
4079 * In cases where we can't go higher
4080 * we should just use timely.
4084 rack->rc_gp_bwred = 0;
4085 rack->rc_gp_timely_dec_cnt = 0;
4086 /* You get a set number of pushes if timely is trying to reduce */
4087 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) {
4088 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4090 /* Log it stays the same */
4091 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0,
4097 * We are staying between the lower and upper range bounds
4098 * so use timely to decide.
4100 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4104 rack->rc_gp_incr = 0;
4105 rack->rc_gp_timely_inc_cnt = 0;
4106 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) &&
4108 (last_bw_est < low_bnd)) {
4109 /* We are loosing ground */
4110 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4111 rack->rc_gp_timely_dec_cnt++;
4112 /* We are not incrementing really no-count */
4113 rack->rc_gp_incr = 0;
4114 rack->rc_gp_timely_inc_cnt = 0;
4116 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
4118 rack->rc_gp_bwred = 0;
4119 rack->rc_gp_timely_dec_cnt = 0;
4120 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4126 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt)
4128 int32_t timely_says;
4129 uint64_t log_mult, log_rtt_a_diff;
4131 log_rtt_a_diff = rtt;
4132 log_rtt_a_diff <<= 32;
4133 log_rtt_a_diff |= (uint32_t)rtt_diff;
4134 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) *
4135 rack_gp_rtt_maxmul)) {
4136 /* Reduce the b/w multipler */
4138 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
4140 log_mult |= prev_rtt;
4141 rack_log_timely(rack, timely_says, log_mult,
4142 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4143 log_rtt_a_diff, __LINE__, 4);
4144 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
4145 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
4146 max(rack_gp_rtt_mindiv , 1)))) {
4147 /* Increase the b/w multipler */
4148 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
4149 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
4150 max(rack_gp_rtt_mindiv , 1));
4152 log_mult |= prev_rtt;
4154 rack_log_timely(rack, timely_says, log_mult ,
4155 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4156 log_rtt_a_diff, __LINE__, 5);
4159 * Use a gradient to find it the timely gradient
4161 * grad = rc_rtt_diff / min_rtt;
4163 * anything below or equal to 0 will be
4164 * a increase indication. Anything above
4165 * zero is a decrease. Note we take care
4166 * of the actual gradient calculation
4167 * in the reduction (its not needed for
4170 log_mult = prev_rtt;
4171 if (rtt_diff <= 0) {
4173 * Rttdiff is less than zero, increase the
4174 * b/w multipler (its 0 or negative)
4177 rack_log_timely(rack, timely_says, log_mult,
4178 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6);
4180 /* Reduce the b/w multipler */
4182 rack_log_timely(rack, timely_says, log_mult,
4183 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7);
4186 return (timely_says);
4190 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
4191 tcp_seq th_ack, int line)
4193 uint64_t tim, bytes_ps, ltim, stim, utim;
4194 uint32_t segsiz, bytes, reqbytes, us_cts;
4195 int32_t gput, new_rtt_diff, timely_says;
4196 uint64_t resid_bw, subpart = 0, addpart = 0, srtt;
4199 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
4200 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
4201 if (TSTMP_GEQ(us_cts, tp->gput_ts))
4202 tim = us_cts - tp->gput_ts;
4206 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts)
4207 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts;
4211 * Use the larger of the send time or ack time. This prevents us
4212 * from being influenced by ack artifacts to come up with too
4213 * high of measurement. Note that since we are spanning over many more
4214 * bytes in most of our measurements hopefully that is less likely to
4220 utim = max(stim, 1);
4221 /* Lets get a msec time ltim too for the old stuff */
4222 ltim = max(1, (utim / HPTS_USEC_IN_MSEC));
4223 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim;
4224 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz));
4225 if ((tim == 0) && (stim == 0)) {
4227 * Invalid measurement time, maybe
4228 * all on one ack/one send?
4232 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4233 0, 0, 0, 10, __LINE__, NULL);
4234 goto skip_measurement;
4236 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) {
4237 /* We never made a us_rtt measurement? */
4240 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4241 0, 0, 0, 10, __LINE__, NULL);
4242 goto skip_measurement;
4245 * Calculate the maximum possible b/w this connection
4246 * could have. We base our calculation on the lowest
4247 * rtt we have seen during the measurement and the
4248 * largest rwnd the client has given us in that time. This
4249 * forms a BDP that is the maximum that we could ever
4250 * get to the client. Anything larger is not valid.
4252 * I originally had code here that rejected measurements
4253 * where the time was less than 1/2 the latest us_rtt.
4254 * But after thinking on that I realized its wrong since
4255 * say you had a 150Mbps or even 1Gbps link, and you
4256 * were a long way away.. example I am in Europe (100ms rtt)
4257 * talking to my 1Gbps link in S.C. Now measuring say 150,000
4258 * bytes my time would be 1.2ms, and yet my rtt would say
4259 * the measurement was invalid the time was < 50ms. The
4260 * same thing is true for 150Mb (8ms of time).
4262 * A better way I realized is to look at what the maximum
4263 * the connection could possibly do. This is gated on
4264 * the lowest RTT we have seen and the highest rwnd.
4265 * We should in theory never exceed that, if we are
4266 * then something on the path is storing up packets
4267 * and then feeding them all at once to our endpoint
4268 * messing up our measurement.
4270 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd;
4271 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC;
4272 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt;
4273 if (SEQ_LT(th_ack, tp->gput_seq)) {
4274 /* No measurement can be made */
4277 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4278 0, 0, 0, 10, __LINE__, NULL);
4279 goto skip_measurement;
4281 bytes = (th_ack - tp->gput_seq);
4282 bytes_ps = (uint64_t)bytes;
4284 * Don't measure a b/w for pacing unless we have gotten at least
4285 * an initial windows worth of data in this measurement interval.
4287 * Small numbers of bytes get badly influenced by delayed ack and
4288 * other artifacts. Note we take the initial window or our
4289 * defined minimum GP (defaulting to 10 which hopefully is the
4292 if (rack->rc_gp_filled == 0) {
4294 * The initial estimate is special. We
4295 * have blasted out an IW worth of packets
4296 * without a real valid ack ts results. We
4297 * then setup the app_limited_needs_set flag,
4298 * this should get the first ack in (probably 2
4299 * MSS worth) to be recorded as the timestamp.
4300 * We thus allow a smaller number of bytes i.e.
4303 reqbytes -= (2 * segsiz);
4304 /* Also lets fill previous for our first measurement to be neutral */
4305 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
4307 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) {
4308 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4309 rack->r_ctl.rc_app_limited_cnt,
4310 0, 0, 10, __LINE__, NULL);
4311 goto skip_measurement;
4314 * We now need to calculate the Timely like status so
4315 * we can update (possibly) the b/w multipliers.
4317 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt;
4318 if (rack->rc_gp_filled == 0) {
4319 /* No previous reading */
4320 rack->r_ctl.rc_rtt_diff = new_rtt_diff;
4322 if (rack->measure_saw_probe_rtt == 0) {
4324 * We don't want a probertt to be counted
4325 * since it will be negative incorrectly. We
4326 * expect to be reducing the RTT when we
4327 * pace at a slower rate.
4329 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8);
4330 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8);
4333 timely_says = rack_make_timely_judgement(rack,
4334 rack->r_ctl.rc_gp_srtt,
4335 rack->r_ctl.rc_rtt_diff,
4336 rack->r_ctl.rc_prev_gp_srtt
4338 bytes_ps *= HPTS_USEC_IN_SEC;
4340 if (bytes_ps > rack->r_ctl.last_max_bw) {
4342 * Something is on path playing
4343 * since this b/w is not possible based
4344 * on our BDP (highest rwnd and lowest rtt
4345 * we saw in the measurement window).
4347 * Another option here would be to
4348 * instead skip the measurement.
4350 rack_log_pacing_delay_calc(rack, bytes, reqbytes,
4351 bytes_ps, rack->r_ctl.last_max_bw, 0,
4352 11, __LINE__, NULL);
4353 bytes_ps = rack->r_ctl.last_max_bw;
4355 /* We store gp for b/w in bytes per second */
4356 if (rack->rc_gp_filled == 0) {
4357 /* Initial measurment */
4359 rack->r_ctl.gp_bw = bytes_ps;
4360 rack->rc_gp_filled = 1;
4361 rack->r_ctl.num_measurements = 1;
4362 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
4364 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4365 rack->r_ctl.rc_app_limited_cnt,
4366 0, 0, 10, __LINE__, NULL);
4368 if (rack->rc_inp->inp_in_hpts &&
4369 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
4371 * Ok we can't trust the pacer in this case
4372 * where we transition from un-paced to paced.
4373 * Or for that matter when the burst mitigation
4374 * was making a wild guess and got it wrong.
4375 * Stop the pacer and clear up all the aggregate
4378 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
4379 rack->r_ctl.rc_hpts_flags = 0;
4380 rack->r_ctl.rc_last_output_to = 0;
4383 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) {
4384 /* Still a small number run an average */
4385 rack->r_ctl.gp_bw += bytes_ps;
4386 addpart = rack->r_ctl.num_measurements;
4387 rack->r_ctl.num_measurements++;
4388 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
4389 /* We have collected enought to move forward */
4390 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements;
4395 * We want to take 1/wma of the goodput and add in to 7/8th
4396 * of the old value weighted by the srtt. So if your measurement
4397 * period is say 2 SRTT's long you would get 1/4 as the
4398 * value, if it was like 1/2 SRTT then you would get 1/16th.
4400 * But we must be careful not to take too much i.e. if the
4401 * srtt is say 20ms and the measurement is taken over
4402 * 400ms our weight would be 400/20 i.e. 20. On the
4403 * other hand if we get a measurement over 1ms with a
4404 * 10ms rtt we only want to take a much smaller portion.
4406 if (rack->r_ctl.num_measurements < 0xff) {
4407 rack->r_ctl.num_measurements++;
4409 srtt = (uint64_t)tp->t_srtt;
4412 * Strange why did t_srtt go back to zero?
4414 if (rack->r_ctl.rc_rack_min_rtt)
4415 srtt = rack->r_ctl.rc_rack_min_rtt;
4417 srtt = HPTS_USEC_IN_MSEC;
4420 * XXXrrs: Note for reviewers, in playing with
4421 * dynamic pacing I discovered this GP calculation
4422 * as done originally leads to some undesired results.
4423 * Basically you can get longer measurements contributing
4424 * too much to the WMA. Thus I changed it if you are doing
4425 * dynamic adjustments to only do the aportioned adjustment
4426 * if we have a very small (time wise) measurement. Longer
4427 * measurements just get there weight (defaulting to 1/8)
4428 * add to the WMA. We may want to think about changing
4429 * this to always do that for both sides i.e. dynamic
4430 * and non-dynamic... but considering lots of folks
4431 * were playing with this I did not want to change the
4432 * calculation per.se. without your thoughts.. Lawerence?
4435 if (rack->rc_gp_dyn_mul == 0) {
4436 subpart = rack->r_ctl.gp_bw * utim;
4437 subpart /= (srtt * 8);
4438 if (subpart < (rack->r_ctl.gp_bw / 2)) {
4440 * The b/w update takes no more
4441 * away then 1/2 our running total
4444 addpart = bytes_ps * utim;
4445 addpart /= (srtt * 8);
4448 * Don't allow a single measurement
4449 * to account for more than 1/2 of the
4450 * WMA. This could happen on a retransmission
4451 * where utim becomes huge compared to
4452 * srtt (multiple retransmissions when using
4453 * the sending rate which factors in all the
4454 * transmissions from the first one).
4456 subpart = rack->r_ctl.gp_bw / 2;
4457 addpart = bytes_ps / 2;
4459 resid_bw = rack->r_ctl.gp_bw - subpart;
4460 rack->r_ctl.gp_bw = resid_bw + addpart;
4463 if ((utim / srtt) <= 1) {
4465 * The b/w update was over a small period
4466 * of time. The idea here is to prevent a small
4467 * measurement time period from counting
4468 * too much. So we scale it based on the
4469 * time so it attributes less than 1/rack_wma_divisor
4470 * of its measurement.
4472 subpart = rack->r_ctl.gp_bw * utim;
4473 subpart /= (srtt * rack_wma_divisor);
4474 addpart = bytes_ps * utim;
4475 addpart /= (srtt * rack_wma_divisor);
4478 * The scaled measurement was long
4479 * enough so lets just add in the
4480 * portion of the measurment i.e. 1/rack_wma_divisor
4482 subpart = rack->r_ctl.gp_bw / rack_wma_divisor;
4483 addpart = bytes_ps / rack_wma_divisor;
4485 if ((rack->measure_saw_probe_rtt == 0) ||
4486 (bytes_ps > rack->r_ctl.gp_bw)) {
4488 * For probe-rtt we only add it in
4489 * if its larger, all others we just
4493 resid_bw = rack->r_ctl.gp_bw - subpart;
4494 rack->r_ctl.gp_bw = resid_bw + addpart;
4498 if ((rack->gp_ready == 0) &&
4499 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
4500 /* We have enough measurements now */
4502 rack_set_cc_pacing(rack);
4503 if (rack->defer_options)
4504 rack_apply_deferred_options(rack);
4506 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim,
4507 rack_get_bw(rack), 22, did_add, NULL);
4508 /* We do not update any multipliers if we are in or have seen a probe-rtt */
4509 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set)
4510 rack_update_multiplier(rack, timely_says, bytes_ps,
4511 rack->r_ctl.rc_gp_srtt,
4512 rack->r_ctl.rc_rtt_diff);
4513 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim,
4514 rack_get_bw(rack), 3, line, NULL);
4515 /* reset the gp srtt and setup the new prev */
4516 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
4517 /* Record the lost count for the next measurement */
4518 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count;
4520 * We restart our diffs based on the gpsrtt in the
4521 * measurement window.
4523 rack->rc_gp_rtt_set = 0;
4524 rack->rc_gp_saw_rec = 0;
4525 rack->rc_gp_saw_ca = 0;
4526 rack->rc_gp_saw_ss = 0;
4527 rack->rc_dragged_bottom = 0;
4531 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
4534 * XXXLAS: This is a temporary hack, and should be
4535 * chained off VOI_TCP_GPUT when stats(9) grows an
4536 * API to deal with chained VOIs.
4538 if (tp->t_stats_gput_prev > 0)
4539 stats_voi_update_abs_s32(tp->t_stats,
4541 ((gput - tp->t_stats_gput_prev) * 100) /
4542 tp->t_stats_gput_prev);
4544 tp->t_flags &= ~TF_GPUTINPROG;
4545 tp->t_stats_gput_prev = gput;
4547 * Now are we app limited now and there is space from where we
4548 * were to where we want to go?
4550 * We don't do the other case i.e. non-applimited here since
4551 * the next send will trigger us picking up the missing data.
4553 if (rack->r_ctl.rc_first_appl &&
4554 TCPS_HAVEESTABLISHED(tp->t_state) &&
4555 rack->r_ctl.rc_app_limited_cnt &&
4556 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) &&
4557 ((rack->r_ctl.rc_first_appl->r_start - th_ack) >
4558 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
4560 * Yep there is enough outstanding to make a measurement here.
4562 struct rack_sendmap *rsm, fe;
4564 tp->t_flags |= TF_GPUTINPROG;
4565 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
4566 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
4567 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
4568 rack->app_limited_needs_set = 0;
4569 tp->gput_seq = th_ack;
4570 if (rack->in_probe_rtt)
4571 rack->measure_saw_probe_rtt = 1;
4572 else if ((rack->measure_saw_probe_rtt) &&
4573 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
4574 rack->measure_saw_probe_rtt = 0;
4575 if ((rack->r_ctl.rc_first_appl->r_start - th_ack) >= rack_get_measure_window(tp, rack)) {
4576 /* There is a full window to gain info from */
4577 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
4579 /* We can only measure up to the applimited point */
4580 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_start - th_ack);
4583 * Now we need to find the timestamp of the send at tp->gput_seq
4584 * for the send based measurement.
4586 fe.r_start = tp->gput_seq;
4587 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
4589 /* Ok send-based limit is set */
4590 if (SEQ_LT(rsm->r_start, tp->gput_seq)) {
4592 * Move back to include the earlier part
4593 * so our ack time lines up right (this may
4594 * make an overlapping measurement but thats
4597 tp->gput_seq = rsm->r_start;
4599 if (rsm->r_flags & RACK_ACKED)
4600 tp->gput_ts = (uint32_t)rsm->r_ack_arrival;
4602 rack->app_limited_needs_set = 1;
4603 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
4606 * If we don't find the rsm due to some
4607 * send-limit set the current time, which
4608 * basically disables the send-limit.
4613 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
4615 rack_log_pacing_delay_calc(rack,
4620 rack->r_ctl.rc_app_limited_cnt,
4627 * CC wrapper hook functions
4630 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs,
4631 uint16_t type, int32_t recovery)
4633 uint32_t prior_cwnd, acked;
4634 struct tcp_log_buffer *lgb = NULL;
4635 uint8_t labc_to_use;
4637 INP_WLOCK_ASSERT(tp->t_inpcb);
4638 tp->ccv->nsegs = nsegs;
4639 acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una);
4640 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
4643 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp);
4644 if (tp->ccv->bytes_this_ack > max) {
4645 tp->ccv->bytes_this_ack = max;
4649 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
4650 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd);
4652 if ((tp->t_flags & TF_GPUTINPROG) &&
4653 rack_enough_for_measurement(tp, rack, th_ack)) {
4654 /* Measure the Goodput */
4655 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__);
4656 #ifdef NETFLIX_PEAKRATE
4657 if ((type == CC_ACK) &&
4658 (tp->t_maxpeakrate)) {
4660 * We update t_peakrate_thr. This gives us roughly
4661 * one update per round trip time. Note
4662 * it will only be used if pace_always is off i.e
4663 * we don't do this for paced flows.
4665 rack_update_peakrate_thr(tp);
4669 /* Which way our we limited, if not cwnd limited no advance in CA */
4670 if (tp->snd_cwnd <= tp->snd_wnd)
4671 tp->ccv->flags |= CCF_CWND_LIMITED;
4673 tp->ccv->flags &= ~CCF_CWND_LIMITED;
4674 if (tp->snd_cwnd > tp->snd_ssthresh) {
4675 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
4676 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp));
4677 /* For the setting of a window past use the actual scwnd we are using */
4678 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) {
4679 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use;
4680 tp->ccv->flags |= CCF_ABC_SENTAWND;
4683 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
4684 tp->t_bytes_acked = 0;
4686 prior_cwnd = tp->snd_cwnd;
4687 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec ||
4688 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf)))
4689 labc_to_use = rack->rc_labc;
4691 labc_to_use = rack_max_abc_post_recovery;
4692 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
4693 union tcp_log_stackspecific log;
4696 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4697 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
4698 log.u_bbr.flex1 = th_ack;
4699 log.u_bbr.flex2 = tp->ccv->flags;
4700 log.u_bbr.flex3 = tp->ccv->bytes_this_ack;
4701 log.u_bbr.flex4 = tp->ccv->nsegs;
4702 log.u_bbr.flex5 = labc_to_use;
4703 log.u_bbr.flex6 = prior_cwnd;
4704 log.u_bbr.flex7 = V_tcp_do_newsack;
4705 log.u_bbr.flex8 = 1;
4706 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
4707 0, &log, false, NULL, NULL, 0, &tv);
4709 if (CC_ALGO(tp)->ack_received != NULL) {
4710 /* XXXLAS: Find a way to live without this */
4711 tp->ccv->curack = th_ack;
4712 tp->ccv->labc = labc_to_use;
4713 tp->ccv->flags |= CCF_USE_LOCAL_ABC;
4714 CC_ALGO(tp)->ack_received(tp->ccv, type);
4717 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd;
4719 if (rack->r_must_retran) {
4720 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) {
4722 * We now are beyond the rxt point so lets disable
4725 rack->r_ctl.rc_out_at_rto = 0;
4726 rack->r_must_retran = 0;
4727 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) {
4729 * Only decrement the rc_out_at_rto if the cwnd advances
4730 * at least a whole segment. Otherwise next time the peer
4731 * acks, we won't be able to send this generaly happens
4732 * when we are in Congestion Avoidance.
4734 if (acked <= rack->r_ctl.rc_out_at_rto){
4735 rack->r_ctl.rc_out_at_rto -= acked;
4737 rack->r_ctl.rc_out_at_rto = 0;
4742 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use);
4744 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) {
4745 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use;
4747 #ifdef NETFLIX_PEAKRATE
4748 /* we enforce max peak rate if it is set and we are not pacing */
4749 if ((rack->rc_always_pace == 0) &&
4750 tp->t_peakrate_thr &&
4751 (tp->snd_cwnd > tp->t_peakrate_thr)) {
4752 tp->snd_cwnd = tp->t_peakrate_thr;
4758 tcp_rack_partialack(struct tcpcb *tp)
4760 struct tcp_rack *rack;
4762 rack = (struct tcp_rack *)tp->t_fb_ptr;
4763 INP_WLOCK_ASSERT(tp->t_inpcb);
4765 * If we are doing PRR and have enough
4766 * room to send <or> we are pacing and prr
4767 * is disabled we will want to see if we
4768 * can send data (by setting r_wanted_output to
4771 if ((rack->r_ctl.rc_prr_sndcnt > 0) ||
4773 rack->r_wanted_output = 1;
4777 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack)
4779 struct tcp_rack *rack;
4782 orig_cwnd = tp->snd_cwnd;
4783 INP_WLOCK_ASSERT(tp->t_inpcb);
4784 rack = (struct tcp_rack *)tp->t_fb_ptr;
4785 /* only alert CC if we alerted when we entered */
4786 if (CC_ALGO(tp)->post_recovery != NULL) {
4787 tp->ccv->curack = th_ack;
4788 CC_ALGO(tp)->post_recovery(tp->ccv);
4789 if (tp->snd_cwnd < tp->snd_ssthresh) {
4791 * Rack has burst control and pacing
4792 * so lets not set this any lower than
4793 * snd_ssthresh per RFC-6582 (option 2).
4795 tp->snd_cwnd = tp->snd_ssthresh;
4798 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
4799 union tcp_log_stackspecific log;
4802 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4803 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
4804 log.u_bbr.flex1 = th_ack;
4805 log.u_bbr.flex2 = tp->ccv->flags;
4806 log.u_bbr.flex3 = tp->ccv->bytes_this_ack;
4807 log.u_bbr.flex4 = tp->ccv->nsegs;
4808 log.u_bbr.flex5 = V_tcp_abc_l_var;
4809 log.u_bbr.flex6 = orig_cwnd;
4810 log.u_bbr.flex7 = V_tcp_do_newsack;
4811 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
4812 log.u_bbr.flex8 = 2;
4813 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
4814 0, &log, false, NULL, NULL, 0, &tv);
4816 if ((rack->rack_no_prr == 0) &&
4817 (rack->no_prr_addback == 0) &&
4818 (rack->r_ctl.rc_prr_sndcnt > 0)) {
4820 * Suck the next prr cnt back into cwnd, but
4821 * only do that if we are not application limited.
4823 if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
4825 * We are allowed to add back to the cwnd the amount we did
4827 * a) no_prr_addback is off.
4828 * b) we are not app limited
4829 * c) we are doing prr
4831 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none).
4833 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax),
4834 rack->r_ctl.rc_prr_sndcnt);
4836 rack->r_ctl.rc_prr_sndcnt = 0;
4837 rack_log_to_prr(rack, 1, 0);
4839 rack_log_to_prr(rack, 14, orig_cwnd);
4840 tp->snd_recover = tp->snd_una;
4841 EXIT_RECOVERY(tp->t_flags);
4845 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack)
4847 struct tcp_rack *rack;
4848 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd;
4850 INP_WLOCK_ASSERT(tp->t_inpcb);
4852 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
4854 if (IN_RECOVERY(tp->t_flags) == 0) {
4855 in_rec_at_entry = 0;
4856 ssthresh_enter = tp->snd_ssthresh;
4857 cwnd_enter = tp->snd_cwnd;
4859 in_rec_at_entry = 1;
4860 rack = (struct tcp_rack *)tp->t_fb_ptr;
4863 tp->t_flags &= ~TF_WASFRECOVERY;
4864 tp->t_flags &= ~TF_WASCRECOVERY;
4865 if (!IN_FASTRECOVERY(tp->t_flags)) {
4866 rack->r_ctl.rc_prr_delivered = 0;
4867 rack->r_ctl.rc_prr_out = 0;
4868 if (rack->rack_no_prr == 0) {
4869 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
4870 rack_log_to_prr(rack, 2, in_rec_at_entry);
4872 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
4873 tp->snd_recover = tp->snd_max;
4874 if (tp->t_flags2 & TF2_ECN_PERMIT)
4875 tp->t_flags2 |= TF2_ECN_SND_CWR;
4879 if (!IN_CONGRECOVERY(tp->t_flags) ||
4881 * Allow ECN reaction on ACK to CWR, if
4882 * that data segment was also CE marked.
4884 SEQ_GEQ(ack, tp->snd_recover)) {
4885 EXIT_CONGRECOVERY(tp->t_flags);
4886 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd);
4887 tp->snd_recover = tp->snd_max + 1;
4888 if (tp->t_flags2 & TF2_ECN_PERMIT)
4889 tp->t_flags2 |= TF2_ECN_SND_CWR;
4894 tp->t_bytes_acked = 0;
4895 EXIT_RECOVERY(tp->t_flags);
4896 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 /
4897 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp);
4898 orig_cwnd = tp->snd_cwnd;
4899 tp->snd_cwnd = ctf_fixed_maxseg(tp);
4900 rack_log_to_prr(rack, 16, orig_cwnd);
4901 if (tp->t_flags2 & TF2_ECN_PERMIT)
4902 tp->t_flags2 |= TF2_ECN_SND_CWR;
4905 KMOD_TCPSTAT_INC(tcps_sndrexmitbad);
4906 /* RTO was unnecessary, so reset everything. */
4907 tp->snd_cwnd = tp->snd_cwnd_prev;
4908 tp->snd_ssthresh = tp->snd_ssthresh_prev;
4909 tp->snd_recover = tp->snd_recover_prev;
4910 if (tp->t_flags & TF_WASFRECOVERY) {
4911 ENTER_FASTRECOVERY(tp->t_flags);
4912 tp->t_flags &= ~TF_WASFRECOVERY;
4914 if (tp->t_flags & TF_WASCRECOVERY) {
4915 ENTER_CONGRECOVERY(tp->t_flags);
4916 tp->t_flags &= ~TF_WASCRECOVERY;
4918 tp->snd_nxt = tp->snd_max;
4919 tp->t_badrxtwin = 0;
4922 if ((CC_ALGO(tp)->cong_signal != NULL) &&
4924 tp->ccv->curack = ack;
4925 CC_ALGO(tp)->cong_signal(tp->ccv, type);
4927 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) {
4928 rack_log_to_prr(rack, 15, cwnd_enter);
4929 rack->r_ctl.dsack_byte_cnt = 0;
4930 rack->r_ctl.retran_during_recovery = 0;
4931 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter;
4932 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter;
4933 rack->r_ent_rec_ns = 1;
4938 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp)
4942 INP_WLOCK_ASSERT(tp->t_inpcb);
4944 #ifdef NETFLIX_STATS
4945 KMOD_TCPSTAT_INC(tcps_idle_restarts);
4946 if (tp->t_state == TCPS_ESTABLISHED)
4947 KMOD_TCPSTAT_INC(tcps_idle_estrestarts);
4949 if (CC_ALGO(tp)->after_idle != NULL)
4950 CC_ALGO(tp)->after_idle(tp->ccv);
4952 if (tp->snd_cwnd == 1)
4953 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
4955 i_cwnd = rc_init_window(rack);
4958 * Being idle is no differnt than the initial window. If the cc
4959 * clamps it down below the initial window raise it to the initial
4962 if (tp->snd_cwnd < i_cwnd) {
4963 tp->snd_cwnd = i_cwnd;
4968 * Indicate whether this ack should be delayed. We can delay the ack if
4969 * following conditions are met:
4970 * - There is no delayed ack timer in progress.
4971 * - Our last ack wasn't a 0-sized window. We never want to delay
4972 * the ack that opens up a 0-sized window.
4973 * - LRO wasn't used for this segment. We make sure by checking that the
4974 * segment size is not larger than the MSS.
4975 * - Delayed acks are enabled or this is a half-synchronized T/TCP
4978 #define DELAY_ACK(tp, tlen) \
4979 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
4980 ((tp->t_flags & TF_DELACK) == 0) && \
4981 (tlen <= tp->t_maxseg) && \
4982 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
4984 static struct rack_sendmap *
4985 rack_find_lowest_rsm(struct tcp_rack *rack)
4987 struct rack_sendmap *rsm;
4990 * Walk the time-order transmitted list looking for an rsm that is
4991 * not acked. This will be the one that was sent the longest time
4992 * ago that is still outstanding.
4994 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
4995 if (rsm->r_flags & RACK_ACKED) {
5004 static struct rack_sendmap *
5005 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
5007 struct rack_sendmap *prsm;
5010 * Walk the sequence order list backward until we hit and arrive at
5011 * the highest seq not acked. In theory when this is called it
5012 * should be the last segment (which it was not).
5014 counter_u64_add(rack_find_high, 1);
5016 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) {
5017 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
5026 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts)
5032 * lro is the flag we use to determine if we have seen reordering.
5033 * If it gets set we have seen reordering. The reorder logic either
5034 * works in one of two ways:
5036 * If reorder-fade is configured, then we track the last time we saw
5037 * re-ordering occur. If we reach the point where enough time as
5038 * passed we no longer consider reordering has occuring.
5040 * Or if reorder-face is 0, then once we see reordering we consider
5041 * the connection to alway be subject to reordering and just set lro
5044 * In the end if lro is non-zero we add the extra time for
5049 if (rack->r_ctl.rc_reorder_ts) {
5050 if (rack->r_ctl.rc_reorder_fade) {
5051 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
5052 lro = cts - rack->r_ctl.rc_reorder_ts;
5055 * No time as passed since the last
5056 * reorder, mark it as reordering.
5061 /* Negative time? */
5064 if (lro > rack->r_ctl.rc_reorder_fade) {
5065 /* Turn off reordering seen too */
5066 rack->r_ctl.rc_reorder_ts = 0;
5070 /* Reodering does not fade */
5076 thresh = srtt + rack->r_ctl.rc_pkt_delay;
5078 /* It must be set, if not you get 1/4 rtt */
5079 if (rack->r_ctl.rc_reorder_shift)
5080 thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
5082 thresh += (srtt >> 2);
5086 /* We don't let the rack timeout be above a RTO */
5087 if (thresh > rack->rc_tp->t_rxtcur) {
5088 thresh = rack->rc_tp->t_rxtcur;
5090 /* And we don't want it above the RTO max either */
5091 if (thresh > rack_rto_max) {
5092 thresh = rack_rto_max;
5098 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
5099 struct rack_sendmap *rsm, uint32_t srtt)
5101 struct rack_sendmap *prsm;
5102 uint32_t thresh, len;
5107 if (rack->r_ctl.rc_tlp_threshold)
5108 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
5110 thresh = (srtt * 2);
5112 /* Get the previous sent packet, if any */
5113 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
5114 counter_u64_add(rack_enter_tlp_calc, 1);
5115 len = rsm->r_end - rsm->r_start;
5116 if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
5117 /* Exactly like the ID */
5118 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) {
5119 uint32_t alt_thresh;
5121 * Compensate for delayed-ack with the d-ack time.
5123 counter_u64_add(rack_used_tlpmethod, 1);
5124 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5125 if (alt_thresh > thresh)
5126 thresh = alt_thresh;
5128 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
5130 prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
5131 if (prsm && (len <= segsiz)) {
5133 * Two packets outstanding, thresh should be (2*srtt) +
5134 * possible inter-packet delay (if any).
5136 uint32_t inter_gap = 0;
5139 counter_u64_add(rack_used_tlpmethod, 1);
5140 idx = rsm->r_rtr_cnt - 1;
5141 nidx = prsm->r_rtr_cnt - 1;
5142 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) {
5143 /* Yes it was sent later (or at the same time) */
5144 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
5146 thresh += inter_gap;
5147 } else if (len <= segsiz) {
5149 * Possibly compensate for delayed-ack.
5151 uint32_t alt_thresh;
5153 counter_u64_add(rack_used_tlpmethod2, 1);
5154 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5155 if (alt_thresh > thresh)
5156 thresh = alt_thresh;
5158 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
5160 if (len <= segsiz) {
5161 uint32_t alt_thresh;
5163 * Compensate for delayed-ack with the d-ack time.
5165 counter_u64_add(rack_used_tlpmethod, 1);
5166 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5167 if (alt_thresh > thresh)
5168 thresh = alt_thresh;
5171 /* Not above an RTO */
5172 if (thresh > tp->t_rxtcur) {
5173 thresh = tp->t_rxtcur;
5175 /* Not above a RTO max */
5176 if (thresh > rack_rto_max) {
5177 thresh = rack_rto_max;
5179 /* Apply user supplied min TLP */
5180 if (thresh < rack_tlp_min) {
5181 thresh = rack_tlp_min;
5187 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack)
5190 * We want the rack_rtt which is the
5191 * last rtt we measured. However if that
5192 * does not exist we fallback to the srtt (which
5193 * we probably will never do) and then as a last
5194 * resort we use RACK_INITIAL_RTO if no srtt is
5197 if (rack->rc_rack_rtt)
5198 return (rack->rc_rack_rtt);
5199 else if (tp->t_srtt == 0)
5200 return (RACK_INITIAL_RTO);
5201 return (tp->t_srtt);
5204 static struct rack_sendmap *
5205 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
5208 * Check to see that we don't need to fall into recovery. We will
5209 * need to do so if our oldest transmit is past the time we should
5212 struct tcp_rack *rack;
5213 struct rack_sendmap *rsm;
5215 uint32_t srtt, thresh;
5217 rack = (struct tcp_rack *)tp->t_fb_ptr;
5218 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
5221 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5225 if (rsm->r_flags & RACK_ACKED) {
5226 rsm = rack_find_lowest_rsm(rack);
5230 idx = rsm->r_rtr_cnt - 1;
5231 srtt = rack_grab_rtt(tp, rack);
5232 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
5233 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) {
5236 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) {
5239 /* Ok if we reach here we are over-due and this guy can be sent */
5240 if (IN_RECOVERY(tp->t_flags) == 0) {
5242 * For the one that enters us into recovery record undo
5245 rack->r_ctl.rc_rsm_start = rsm->r_start;
5246 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
5247 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
5249 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
5254 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
5260 t = (tp->t_srtt + (tp->t_rttvar << 2));
5261 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
5262 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop);
5263 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
5265 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
5266 ret_val = (uint32_t)tt;
5271 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack)
5274 * Start the FR timer, we do this based on getting the first one in
5275 * the rc_tmap. Note that if its NULL we must stop the timer. in all
5276 * events we need to stop the running timer (if its running) before
5277 * starting the new one.
5279 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse;
5282 int32_t is_tlp_timer = 0;
5283 struct rack_sendmap *rsm;
5285 if (rack->t_timers_stopped) {
5286 /* All timers have been stopped none are to run */
5289 if (rack->rc_in_persist) {
5290 /* We can't start any timer in persists */
5291 return (rack_get_persists_timer_val(tp, rack));
5293 rack->rc_on_min_to = 0;
5294 if ((tp->t_state < TCPS_ESTABLISHED) ||
5295 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
5298 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5299 if ((rsm == NULL) || sup_rack) {
5300 /* Nothing on the send map or no rack */
5302 time_since_sent = 0;
5303 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5306 * Should we discount the RTX timer any?
5308 * We want to discount it the smallest amount.
5309 * If a timer (Rack/TLP or RXT) has gone off more
5310 * recently thats the discount we want to use (now - timer time).
5311 * If the retransmit of the oldest packet was more recent then
5312 * we want to use that (now - oldest-packet-last_transmit_time).
5315 idx = rsm->r_rtr_cnt - 1;
5316 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx])))
5317 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
5319 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
5320 if (TSTMP_GT(cts, tstmp_touse))
5321 time_since_sent = cts - tstmp_touse;
5323 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
5324 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
5326 if (to > time_since_sent)
5327 to -= time_since_sent;
5329 to = rack->r_ctl.rc_min_to;
5332 /* Special case for KEEPINIT */
5333 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
5334 (TP_KEEPINIT(tp) != 0) &&
5337 * We have to put a ceiling on the rxt timer
5338 * of the keep-init timeout.
5340 uint32_t max_time, red;
5342 max_time = TICKS_2_USEC(TP_KEEPINIT(tp));
5343 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) {
5344 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]);
5350 /* Reduce timeout to the keep value if needed */
5358 if (rsm->r_flags & RACK_ACKED) {
5359 rsm = rack_find_lowest_rsm(rack);
5365 if (rack->sack_attack_disable) {
5367 * We don't want to do
5368 * any TLP's if you are an attacker.
5369 * Though if you are doing what
5370 * is expected you may still have
5371 * SACK-PASSED marks.
5375 /* Convert from ms to usecs */
5376 if ((rsm->r_flags & RACK_SACK_PASSED) || (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
5377 if ((tp->t_flags & TF_SENTFIN) &&
5378 ((tp->snd_max - tp->snd_una) == 1) &&
5379 (rsm->r_flags & RACK_HAS_FIN)) {
5381 * We don't start a rack timer if all we have is a
5386 if ((rack->use_rack_rr == 0) &&
5387 (IN_FASTRECOVERY(tp->t_flags)) &&
5388 (rack->rack_no_prr == 0) &&
5389 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) {
5391 * We are not cheating, in recovery and
5392 * not enough ack's to yet get our next
5393 * retransmission out.
5395 * Note that classified attackers do not
5396 * get to use the rack-cheat.
5400 srtt = rack_grab_rtt(tp, rack);
5401 thresh = rack_calc_thresh_rack(rack, srtt, cts);
5402 idx = rsm->r_rtr_cnt - 1;
5403 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh;
5404 if (SEQ_GEQ(exp, cts)) {
5406 if (to < rack->r_ctl.rc_min_to) {
5407 to = rack->r_ctl.rc_min_to;
5408 if (rack->r_rr_config == 3)
5409 rack->rc_on_min_to = 1;
5412 to = rack->r_ctl.rc_min_to;
5413 if (rack->r_rr_config == 3)
5414 rack->rc_on_min_to = 1;
5417 /* Ok we need to do a TLP not RACK */
5419 if ((rack->rc_tlp_in_progress != 0) &&
5420 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) {
5422 * The previous send was a TLP and we have sent
5423 * N TLP's without sending new data.
5427 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
5429 /* We found no rsm to TLP with. */
5432 if (rsm->r_flags & RACK_HAS_FIN) {
5433 /* If its a FIN we dont do TLP */
5437 idx = rsm->r_rtr_cnt - 1;
5438 time_since_sent = 0;
5439 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time))
5440 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
5442 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
5443 if (TSTMP_GT(cts, tstmp_touse))
5444 time_since_sent = cts - tstmp_touse;
5447 if ((rack->rc_srtt_measure_made == 0) &&
5448 (tp->t_srtt == 1)) {
5450 * If another stack as run and set srtt to 1,
5451 * then the srtt was 0, so lets use the initial.
5453 srtt = RACK_INITIAL_RTO;
5455 srtt_cur = tp->t_srtt;
5459 srtt = RACK_INITIAL_RTO;
5461 * If the SRTT is not keeping up and the
5462 * rack RTT has spiked we want to use
5463 * the last RTT not the smoothed one.
5465 if (rack_tlp_use_greater &&
5467 (srtt < rack_grab_rtt(tp, rack))) {
5468 srtt = rack_grab_rtt(tp, rack);
5470 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
5471 if (thresh > time_since_sent) {
5472 to = thresh - time_since_sent;
5474 to = rack->r_ctl.rc_min_to;
5475 rack_log_alt_to_to_cancel(rack,
5477 time_since_sent, /* flex2 */
5478 tstmp_touse, /* flex3 */
5479 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */
5480 (uint32_t)rsm->r_tim_lastsent[idx],
5484 if (to < rack_tlp_min) {
5487 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) {
5489 * If the TLP time works out to larger than the max
5490 * RTO lets not do TLP.. just RTO.
5495 if (is_tlp_timer == 0) {
5496 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
5498 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
5506 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5508 if (rack->rc_in_persist == 0) {
5509 if (tp->t_flags & TF_GPUTINPROG) {
5511 * Stop the goodput now, the calling of the
5512 * measurement function clears the flag.
5514 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__);
5516 #ifdef NETFLIX_SHARED_CWND
5517 if (rack->r_ctl.rc_scw) {
5518 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
5519 rack->rack_scwnd_is_idle = 1;
5522 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
5523 if (rack->r_ctl.rc_went_idle_time == 0)
5524 rack->r_ctl.rc_went_idle_time = 1;
5525 rack_timer_cancel(tp, rack, cts, __LINE__);
5527 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
5528 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
5529 rack->rc_in_persist = 1;
5534 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5536 if (rack->rc_inp->inp_in_hpts) {
5537 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
5538 rack->r_ctl.rc_hpts_flags = 0;
5540 #ifdef NETFLIX_SHARED_CWND
5541 if (rack->r_ctl.rc_scw) {
5542 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
5543 rack->rack_scwnd_is_idle = 0;
5546 if (rack->rc_gp_dyn_mul &&
5547 (rack->use_fixed_rate == 0) &&
5548 (rack->rc_always_pace)) {
5550 * Do we count this as if a probe-rtt just
5553 uint32_t time_idle, idle_min;
5555 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time;
5556 idle_min = rack_min_probertt_hold;
5557 if (rack_probertt_gpsrtt_cnt_div) {
5559 extra = (uint64_t)rack->r_ctl.rc_gp_srtt *
5560 (uint64_t)rack_probertt_gpsrtt_cnt_mul;
5561 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div;
5562 idle_min += (uint32_t)extra;
5564 if (time_idle >= idle_min) {
5565 /* Yes, we count it as a probe-rtt. */
5568 us_cts = tcp_get_usecs(NULL);
5569 if (rack->in_probe_rtt == 0) {
5570 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
5571 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
5572 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
5573 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
5575 rack_exit_probertt(rack, us_cts);
5579 rack->rc_in_persist = 0;
5580 rack->r_ctl.rc_went_idle_time = 0;
5582 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
5583 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
5584 rack->r_ctl.rc_agg_delayed = 0;
5587 rack->r_ctl.rc_agg_early = 0;
5591 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
5592 struct hpts_diag *diag, struct timeval *tv)
5594 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
5595 union tcp_log_stackspecific log;
5597 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5598 log.u_bbr.flex1 = diag->p_nxt_slot;
5599 log.u_bbr.flex2 = diag->p_cur_slot;
5600 log.u_bbr.flex3 = diag->slot_req;
5601 log.u_bbr.flex4 = diag->inp_hptsslot;
5602 log.u_bbr.flex5 = diag->slot_remaining;
5603 log.u_bbr.flex6 = diag->need_new_to;
5604 log.u_bbr.flex7 = diag->p_hpts_active;
5605 log.u_bbr.flex8 = diag->p_on_min_sleep;
5606 /* Hijack other fields as needed */
5607 log.u_bbr.epoch = diag->have_slept;
5608 log.u_bbr.lt_epoch = diag->yet_to_sleep;
5609 log.u_bbr.pkts_out = diag->co_ret;
5610 log.u_bbr.applimited = diag->hpts_sleep_time;
5611 log.u_bbr.delivered = diag->p_prev_slot;
5612 log.u_bbr.inflight = diag->p_runningtick;
5613 log.u_bbr.bw_inuse = diag->wheel_tick;
5614 log.u_bbr.rttProp = diag->wheel_cts;
5615 log.u_bbr.timeStamp = cts;
5616 log.u_bbr.delRate = diag->maxticks;
5617 log.u_bbr.cur_del_rate = diag->p_curtick;
5618 log.u_bbr.cur_del_rate <<= 32;
5619 log.u_bbr.cur_del_rate |= diag->p_lasttick;
5620 TCP_LOG_EVENTP(rack->rc_tp, NULL,
5621 &rack->rc_inp->inp_socket->so_rcv,
5622 &rack->rc_inp->inp_socket->so_snd,
5623 BBR_LOG_HPTSDIAG, 0,
5624 0, &log, false, tv);
5630 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type)
5632 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
5633 union tcp_log_stackspecific log;
5636 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5637 log.u_bbr.flex1 = sb->sb_flags;
5638 log.u_bbr.flex2 = len;
5639 log.u_bbr.flex3 = sb->sb_state;
5640 log.u_bbr.flex8 = type;
5641 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
5642 TCP_LOG_EVENTP(rack->rc_tp, NULL,
5643 &rack->rc_inp->inp_socket->so_rcv,
5644 &rack->rc_inp->inp_socket->so_snd,
5646 len, &log, false, &tv);
5651 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
5652 int32_t slot, uint32_t tot_len_this_send, int sup_rack)
5654 struct hpts_diag diag;
5657 uint32_t delayed_ack = 0;
5658 uint32_t hpts_timeout;
5659 uint32_t entry_slot = slot;
5665 if ((tp->t_state == TCPS_CLOSED) ||
5666 (tp->t_state == TCPS_LISTEN)) {
5669 if (inp->inp_in_hpts) {
5670 /* Already on the pacer */
5673 stopped = rack->rc_tmr_stopped;
5674 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
5675 left = rack->r_ctl.rc_timer_exp - cts;
5677 rack->r_ctl.rc_timer_exp = 0;
5678 rack->r_ctl.rc_hpts_flags = 0;
5679 us_cts = tcp_get_usecs(&tv);
5680 /* Now early/late accounting */
5681 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL);
5682 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) {
5684 * We have a early carry over set,
5685 * we can always add more time so we
5686 * can always make this compensation.
5688 * Note if ack's are allowed to wake us do not
5689 * penalize the next timer for being awoke
5690 * by an ack aka the rc_agg_early (non-paced mode).
5692 slot += rack->r_ctl.rc_agg_early;
5694 rack->r_ctl.rc_agg_early = 0;
5698 * This is harder, we can
5699 * compensate some but it
5700 * really depends on what
5701 * the current pacing time is.
5703 if (rack->r_ctl.rc_agg_delayed >= slot) {
5705 * We can't compensate for it all.
5706 * And we have to have some time
5707 * on the clock. We always have a min
5708 * 10 slots (10 x 10 i.e. 100 usecs).
5710 if (slot <= HPTS_TICKS_PER_USEC) {
5712 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_USEC - slot);
5713 slot = HPTS_TICKS_PER_USEC;
5715 /* We take off some */
5716 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_USEC);
5717 slot = HPTS_TICKS_PER_USEC;
5720 slot -= rack->r_ctl.rc_agg_delayed;
5721 rack->r_ctl.rc_agg_delayed = 0;
5722 /* Make sure we have 100 useconds at minimum */
5723 if (slot < HPTS_TICKS_PER_USEC) {
5724 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_USEC - slot;
5725 slot = HPTS_TICKS_PER_USEC;
5727 if (rack->r_ctl.rc_agg_delayed == 0)
5732 /* We are pacing too */
5733 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
5735 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack);
5736 #ifdef NETFLIX_EXP_DETECTION
5737 if (rack->sack_attack_disable &&
5738 (slot < tcp_sad_pacing_interval)) {
5740 * We have a potential attacker on
5741 * the line. We have possibly some
5742 * (or now) pacing time set. We want to
5743 * slow down the processing of sacks by some
5744 * amount (if it is an attacker). Set the default
5745 * slot for attackers in place (unless the orginal
5746 * interval is longer). Its stored in
5747 * micro-seconds, so lets convert to msecs.
5749 slot = tcp_sad_pacing_interval;
5752 if (tp->t_flags & TF_DELACK) {
5753 delayed_ack = TICKS_2_USEC(tcp_delacktime);
5754 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
5756 if (delayed_ack && ((hpts_timeout == 0) ||
5757 (delayed_ack < hpts_timeout)))
5758 hpts_timeout = delayed_ack;
5760 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
5762 * If no timers are going to run and we will fall off the hptsi
5763 * wheel, we resort to a keep-alive timer if its configured.
5765 if ((hpts_timeout == 0) &&
5767 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
5768 (tp->t_state <= TCPS_CLOSING)) {
5770 * Ok we have no timer (persists, rack, tlp, rxt or
5771 * del-ack), we don't have segments being paced. So
5772 * all that is left is the keepalive timer.
5774 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
5775 /* Get the established keep-alive time */
5776 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp));
5779 * Get the initial setup keep-alive time,
5780 * note that this is probably not going to
5781 * happen, since rack will be running a rxt timer
5782 * if a SYN of some sort is outstanding. It is
5783 * actually handled in rack_timeout_rxt().
5785 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp));
5787 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
5788 if (rack->in_probe_rtt) {
5790 * We want to instead not wake up a long time from
5791 * now but to wake up about the time we would
5792 * exit probe-rtt and initiate a keep-alive ack.
5793 * This will get us out of probe-rtt and update
5796 hpts_timeout = rack_min_probertt_hold;
5800 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
5801 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
5803 * RACK, TLP, persists and RXT timers all are restartable
5804 * based on actions input .. i.e we received a packet (ack
5805 * or sack) and that changes things (rw, or snd_una etc).
5806 * Thus we can restart them with a new value. For
5807 * keep-alive, delayed_ack we keep track of what was left
5808 * and restart the timer with a smaller value.
5810 if (left < hpts_timeout)
5811 hpts_timeout = left;
5815 * Hack alert for now we can't time-out over 2,147,483
5816 * seconds (a bit more than 596 hours), which is probably ok
5819 if (hpts_timeout > 0x7ffffffe)
5820 hpts_timeout = 0x7ffffffe;
5821 rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
5823 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL);
5824 if ((rack->gp_ready == 0) &&
5825 (rack->use_fixed_rate == 0) &&
5826 (hpts_timeout < slot) &&
5827 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
5829 * We have no good estimate yet for the
5830 * old clunky burst mitigation or the
5831 * real pacing. And the tlp or rxt is smaller
5832 * than the pacing calculation. Lets not
5833 * pace that long since we know the calculation
5834 * so far is not accurate.
5836 slot = hpts_timeout;
5838 rack->r_ctl.last_pacing_time = slot;
5840 * Turn off all the flags for queuing by default. The
5841 * flags have important meanings to what happens when
5842 * LRO interacts with the transport. Most likely (by default now)
5843 * mbuf_queueing and ack compression are on. So the transport
5844 * has a couple of flags that control what happens (if those
5845 * are not on then these flags won't have any effect since it
5846 * won't go through the queuing LRO path).
5848 * INP_MBUF_QUEUE_READY - This flags says that I am busy
5849 * pacing output, so don't disturb. But
5850 * it also means LRO can wake me if there
5851 * is a SACK arrival.
5853 * INP_DONT_SACK_QUEUE - This flag is used in conjunction
5854 * with the above flag (QUEUE_READY) and
5855 * when present it says don't even wake me
5856 * if a SACK arrives.
5858 * The idea behind these flags is that if we are pacing we
5859 * set the MBUF_QUEUE_READY and only get woken up if
5860 * a SACK arrives (which could change things) or if
5861 * our pacing timer expires. If, however, we have a rack
5862 * timer running, then we don't even want a sack to wake
5863 * us since the rack timer has to expire before we can send.
5865 * Other cases should usually have none of the flags set
5866 * so LRO can call into us.
5868 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
5870 rack->r_ctl.rc_last_output_to = us_cts + slot;
5872 * A pacing timer (slot) is being set, in
5873 * such a case we cannot send (we are blocked by
5874 * the timer). So lets tell LRO that it should not
5875 * wake us unless there is a SACK. Note this only
5876 * will be effective if mbuf queueing is on or
5877 * compressed acks are being processed.
5879 inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
5881 * But wait if we have a Rack timer running
5882 * even a SACK should not disturb us (with
5883 * the exception of r_rr_config 3).
5885 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) &&
5886 (rack->r_rr_config != 3))
5887 inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
5888 if (rack->rc_ack_can_sendout_data) {
5890 * Ahh but wait, this is that special case
5891 * where the pacing timer can be disturbed
5892 * backout the changes (used for non-paced
5895 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
5897 if ((rack->use_rack_rr) &&
5898 (rack->r_rr_config < 2) &&
5899 ((hpts_timeout) && (hpts_timeout < slot))) {
5901 * Arrange for the hpts to kick back in after the
5902 * t-o if the t-o does not cause a send.
5904 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout),
5906 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5907 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
5909 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot),
5911 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5912 rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
5914 } else if (hpts_timeout) {
5916 * With respect to inp_flags2 here, lets let any new acks wake
5917 * us up here. Since we are not pacing (no pacing timer), output
5918 * can happen so we should let it. If its a Rack timer, then any inbound
5919 * packet probably won't change the sending (we will be blocked)
5920 * but it may change the prr stats so letting it in (the set defaults
5921 * at the start of this block) are good enough.
5923 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout),
5925 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5926 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
5928 /* No timer starting */
5930 if (SEQ_GT(tp->snd_max, tp->snd_una)) {
5931 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
5932 tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
5936 rack->rc_tmr_stopped = 0;
5938 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv);
5942 * RACK Timer, here we simply do logging and house keeping.
5943 * the normal rack_output() function will call the
5944 * appropriate thing to check if we need to do a RACK retransmit.
5945 * We return 1, saying don't proceed with rack_output only
5946 * when all timers have been stopped (destroyed PCB?).
5949 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5952 * This timer simply provides an internal trigger to send out data.
5953 * The check_recovery_mode call will see if there are needed
5954 * retransmissions, if so we will enter fast-recovery. The output
5955 * call may or may not do the same thing depending on sysctl
5958 struct rack_sendmap *rsm;
5960 if (tp->t_timers->tt_flags & TT_STOPPED) {
5963 counter_u64_add(rack_to_tot, 1);
5964 if (rack->r_state && (rack->r_state != tp->t_state))
5965 rack_set_state(tp, rack);
5966 rack->rc_on_min_to = 0;
5967 rsm = rack_check_recovery_mode(tp, cts);
5968 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm);
5970 rack->r_ctl.rc_resend = rsm;
5971 rack->r_timer_override = 1;
5972 if (rack->use_rack_rr) {
5974 * Don't accumulate extra pacing delay
5975 * we are allowing the rack timer to
5976 * over-ride pacing i.e. rrr takes precedence
5977 * if the pacing interval is longer than the rrr
5978 * time (in other words we get the min pacing
5979 * time versus rrr pacing time).
5981 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
5984 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
5986 /* restart a timer and return 1 */
5987 rack_start_hpts_timer(rack, tp, cts,
5995 rack_adjust_orig_mlen(struct rack_sendmap *rsm)
5997 if (rsm->m->m_len > rsm->orig_m_len) {
5999 * Mbuf grew, caused by sbcompress, our offset does
6002 rsm->orig_m_len = rsm->m->m_len;
6003 } else if (rsm->m->m_len < rsm->orig_m_len) {
6005 * Mbuf shrank, trimmed off the top by an ack, our
6008 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len);
6009 rsm->orig_m_len = rsm->m->m_len;
6014 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm)
6019 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) {
6020 /* Fix up the orig_m_len and possibly the mbuf offset */
6021 rack_adjust_orig_mlen(src_rsm);
6024 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start);
6025 while (soff >= m->m_len) {
6026 /* Move out past this mbuf */
6029 KASSERT((m != NULL),
6030 ("rsm:%p nrsm:%p hit at soff:%u null m",
6031 src_rsm, rsm, soff));
6035 rsm->orig_m_len = m->m_len;
6038 static __inline void
6039 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm,
6040 struct rack_sendmap *rsm, uint32_t start)
6044 nrsm->r_start = start;
6045 nrsm->r_end = rsm->r_end;
6046 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
6047 nrsm->r_flags = rsm->r_flags;
6048 nrsm->r_dupack = rsm->r_dupack;
6049 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed;
6050 nrsm->r_rtr_bytes = 0;
6051 rsm->r_end = nrsm->r_start;
6052 nrsm->r_just_ret = rsm->r_just_ret;
6053 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
6054 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
6056 /* Now if we have SYN flag we keep it on the left edge */
6057 if (nrsm->r_flags & RACK_HAS_SYN)
6058 nrsm->r_flags &= ~RACK_HAS_SYN;
6059 /* Now if we have a FIN flag we keep it on the right edge */
6060 if (rsm->r_flags & RACK_HAS_FIN)
6061 rsm->r_flags &= ~RACK_HAS_FIN;
6062 /* Push bit must go to the right edge as well */
6063 if (rsm->r_flags & RACK_HAD_PUSH)
6064 rsm->r_flags &= ~RACK_HAD_PUSH;
6065 /* Clone over the state of the hw_tls flag */
6066 nrsm->r_hw_tls = rsm->r_hw_tls;
6068 * Now we need to find nrsm's new location in the mbuf chain
6069 * we basically calculate a new offset, which is soff +
6070 * how much is left in original rsm. Then we walk out the mbuf
6071 * chain to find the righ postion, it may be the same mbuf
6074 KASSERT(((rsm->m != NULL) ||
6075 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))),
6076 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack));
6078 rack_setup_offset_for_rsm(rsm, nrsm);
6081 static struct rack_sendmap *
6082 rack_merge_rsm(struct tcp_rack *rack,
6083 struct rack_sendmap *l_rsm,
6084 struct rack_sendmap *r_rsm)
6087 * We are merging two ack'd RSM's,
6088 * the l_rsm is on the left (lower seq
6089 * values) and the r_rsm is on the right
6090 * (higher seq value). The simplest way
6091 * to merge these is to move the right
6092 * one into the left. I don't think there
6093 * is any reason we need to try to find
6094 * the oldest (or last oldest retransmitted).
6096 struct rack_sendmap *rm;
6098 rack_log_map_chg(rack->rc_tp, rack, NULL,
6099 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__);
6100 l_rsm->r_end = r_rsm->r_end;
6101 if (l_rsm->r_dupack < r_rsm->r_dupack)
6102 l_rsm->r_dupack = r_rsm->r_dupack;
6103 if (r_rsm->r_rtr_bytes)
6104 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes;
6105 if (r_rsm->r_in_tmap) {
6106 /* This really should not happen */
6107 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext);
6108 r_rsm->r_in_tmap = 0;
6112 if (r_rsm->r_flags & RACK_HAS_FIN)
6113 l_rsm->r_flags |= RACK_HAS_FIN;
6114 if (r_rsm->r_flags & RACK_TLP)
6115 l_rsm->r_flags |= RACK_TLP;
6116 if (r_rsm->r_flags & RACK_RWND_COLLAPSED)
6117 l_rsm->r_flags |= RACK_RWND_COLLAPSED;
6118 if ((r_rsm->r_flags & RACK_APP_LIMITED) &&
6119 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) {
6121 * If both are app-limited then let the
6122 * free lower the count. If right is app
6123 * limited and left is not, transfer.
6125 l_rsm->r_flags |= RACK_APP_LIMITED;
6126 r_rsm->r_flags &= ~RACK_APP_LIMITED;
6127 if (r_rsm == rack->r_ctl.rc_first_appl)
6128 rack->r_ctl.rc_first_appl = l_rsm;
6130 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm);
6133 panic("removing head in rack:%p rsm:%p rm:%p",
6137 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) {
6138 /* Transfer the split limit to the map we free */
6139 r_rsm->r_limit_type = l_rsm->r_limit_type;
6140 l_rsm->r_limit_type = 0;
6142 rack_free(rack, r_rsm);
6147 * TLP Timer, here we simply setup what segment we want to
6148 * have the TLP expire on, the normal rack_output() will then
6151 * We return 1, saying don't proceed with rack_output only
6152 * when all timers have been stopped (destroyed PCB?).
6155 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6160 struct rack_sendmap *rsm = NULL;
6161 struct rack_sendmap *insret;
6164 uint32_t out, avail;
6165 int collapsed_win = 0;
6167 if (tp->t_timers->tt_flags & TT_STOPPED) {
6170 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
6171 /* Its not time yet */
6174 if (ctf_progress_timeout_check(tp, true)) {
6175 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6176 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6180 * A TLP timer has expired. We have been idle for 2 rtts. So we now
6181 * need to figure out how to force a full MSS segment out.
6183 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL);
6184 rack->r_ctl.retran_during_recovery = 0;
6185 rack->r_ctl.dsack_byte_cnt = 0;
6186 counter_u64_add(rack_tlp_tot, 1);
6187 if (rack->r_state && (rack->r_state != tp->t_state))
6188 rack_set_state(tp, rack);
6189 so = tp->t_inpcb->inp_socket;
6190 avail = sbavail(&so->so_snd);
6191 out = tp->snd_max - tp->snd_una;
6192 if (out > tp->snd_wnd) {
6193 /* special case, we need a retransmission */
6198 * Check our send oldest always settings, and if
6199 * there is an oldest to send jump to the need_retran.
6201 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0))
6205 /* New data is available */
6207 if (amm > ctf_fixed_maxseg(tp)) {
6208 amm = ctf_fixed_maxseg(tp);
6209 if ((amm + out) > tp->snd_wnd) {
6210 /* We are rwnd limited */
6213 } else if (amm < ctf_fixed_maxseg(tp)) {
6214 /* not enough to fill a MTU */
6217 if (IN_FASTRECOVERY(tp->t_flags)) {
6219 if (rack->rack_no_prr == 0) {
6220 if (out + amm <= tp->snd_wnd) {
6221 rack->r_ctl.rc_prr_sndcnt = amm;
6222 rack_log_to_prr(rack, 4, 0);
6227 /* Set the send-new override */
6228 if (out + amm <= tp->snd_wnd)
6229 rack->r_ctl.rc_tlp_new_data = amm;
6233 rack->r_ctl.rc_tlpsend = NULL;
6234 counter_u64_add(rack_tlp_newdata, 1);
6239 * Ok we need to arrange the last un-acked segment to be re-sent, or
6240 * optionally the first un-acked segment.
6242 if (collapsed_win == 0) {
6243 if (rack_always_send_oldest)
6244 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6246 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6247 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
6248 rsm = rack_find_high_nonack(rack, rsm);
6252 counter_u64_add(rack_tlp_does_nada, 1);
6254 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
6260 * We must find the last segment
6261 * that was acceptable by the client.
6263 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
6264 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) {
6270 /* None? if so send the first */
6271 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6273 counter_u64_add(rack_tlp_does_nada, 1);
6275 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
6281 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) {
6283 * We need to split this the last segment in two.
6285 struct rack_sendmap *nrsm;
6287 nrsm = rack_alloc_full_limit(rack);
6290 * No memory to split, we will just exit and punt
6291 * off to the RXT timer.
6293 counter_u64_add(rack_tlp_does_nada, 1);
6296 rack_clone_rsm(rack, nrsm, rsm,
6297 (rsm->r_end - ctf_fixed_maxseg(tp)));
6298 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
6299 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
6301 if (insret != NULL) {
6302 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
6303 nrsm, insret, rack, rsm);
6306 if (rsm->r_in_tmap) {
6307 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
6308 nrsm->r_in_tmap = 1;
6310 rsm->r_flags &= (~RACK_HAS_FIN);
6313 rack->r_ctl.rc_tlpsend = rsm;
6315 rack->r_timer_override = 1;
6316 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
6319 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
6324 * Delayed ack Timer, here we simply need to setup the
6325 * ACK_NOW flag and remove the DELACK flag. From there
6326 * the output routine will send the ack out.
6328 * We only return 1, saying don't proceed, if all timers
6329 * are stopped (destroyed PCB?).
6332 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6334 if (tp->t_timers->tt_flags & TT_STOPPED) {
6337 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL);
6338 tp->t_flags &= ~TF_DELACK;
6339 tp->t_flags |= TF_ACKNOW;
6340 KMOD_TCPSTAT_INC(tcps_delack);
6341 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
6346 * Persists timer, here we simply send the
6347 * same thing as a keepalive will.
6348 * the one byte send.
6350 * We only return 1, saying don't proceed, if all timers
6351 * are stopped (destroyed PCB?).
6354 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6356 struct tcptemp *t_template;
6362 if (tp->t_timers->tt_flags & TT_STOPPED) {
6365 if (rack->rc_in_persist == 0)
6367 if (ctf_progress_timeout_check(tp, false)) {
6368 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6369 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6370 tcp_set_inp_to_drop(inp, ETIMEDOUT);
6373 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
6375 * Persistence timer into zero window. Force a byte to be output, if
6378 KMOD_TCPSTAT_INC(tcps_persisttimeo);
6380 * Hack: if the peer is dead/unreachable, we do not time out if the
6381 * window is closed. After a full backoff, drop the connection if
6382 * the idle time (no responses to probes) reaches the maximum
6383 * backoff that we would use if retransmitting.
6385 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
6386 (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
6387 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) {
6388 KMOD_TCPSTAT_INC(tcps_persistdrop);
6390 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6391 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
6394 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
6395 tp->snd_una == tp->snd_max)
6396 rack_exit_persist(tp, rack, cts);
6397 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
6399 * If the user has closed the socket then drop a persisting
6400 * connection after a much reduced timeout.
6402 if (tp->t_state > TCPS_CLOSE_WAIT &&
6403 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
6405 KMOD_TCPSTAT_INC(tcps_persistdrop);
6406 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6407 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
6410 t_template = tcpip_maketemplate(rack->rc_inp);
6412 /* only set it if we were answered */
6413 if (rack->forced_ack == 0) {
6414 rack->forced_ack = 1;
6415 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
6417 tcp_respond(tp, t_template->tt_ipgen,
6418 &t_template->tt_t, (struct mbuf *)NULL,
6419 tp->rcv_nxt, tp->snd_una - 1, 0);
6420 /* This sends an ack */
6421 if (tp->t_flags & TF_DELACK)
6422 tp->t_flags &= ~TF_DELACK;
6423 free(t_template, M_TEMP);
6425 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
6428 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL);
6429 rack_start_hpts_timer(rack, tp, cts,
6435 * If a keepalive goes off, we had no other timers
6436 * happening. We always return 1 here since this
6437 * routine either drops the connection or sends
6438 * out a segment with respond.
6441 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6443 struct tcptemp *t_template;
6446 if (tp->t_timers->tt_flags & TT_STOPPED) {
6449 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
6451 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL);
6453 * Keep-alive timer went off; send something or drop connection if
6454 * idle for too long.
6456 KMOD_TCPSTAT_INC(tcps_keeptimeo);
6457 if (tp->t_state < TCPS_ESTABLISHED)
6459 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
6460 tp->t_state <= TCPS_CLOSING) {
6461 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
6464 * Send a packet designed to force a response if the peer is
6465 * up and reachable: either an ACK if the connection is
6466 * still alive, or an RST if the peer has closed the
6467 * connection due to timeout or reboot. Using sequence
6468 * number tp->snd_una-1 causes the transmitted zero-length
6469 * segment to lie outside the receive window; by the
6470 * protocol spec, this requires the correspondent TCP to
6473 KMOD_TCPSTAT_INC(tcps_keepprobe);
6474 t_template = tcpip_maketemplate(inp);
6476 if (rack->forced_ack == 0) {
6477 rack->forced_ack = 1;
6478 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
6480 tcp_respond(tp, t_template->tt_ipgen,
6481 &t_template->tt_t, (struct mbuf *)NULL,
6482 tp->rcv_nxt, tp->snd_una - 1, 0);
6483 free(t_template, M_TEMP);
6486 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
6489 KMOD_TCPSTAT_INC(tcps_keepdrops);
6490 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
6491 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
6496 * Retransmit helper function, clear up all the ack
6497 * flags and take care of important book keeping.
6500 rack_remxt_tmr(struct tcpcb *tp)
6503 * The retransmit timer went off, all sack'd blocks must be
6506 struct rack_sendmap *rsm, *trsm = NULL;
6507 struct tcp_rack *rack;
6509 rack = (struct tcp_rack *)tp->t_fb_ptr;
6510 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__);
6511 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL);
6512 if (rack->r_state && (rack->r_state != tp->t_state))
6513 rack_set_state(tp, rack);
6515 * Ideally we would like to be able to
6516 * mark SACK-PASS on anything not acked here.
6518 * However, if we do that we would burst out
6519 * all that data 1ms apart. This would be unwise,
6520 * so for now we will just let the normal rxt timer
6521 * and tlp timer take care of it.
6523 * Also we really need to stick them back in sequence
6524 * order. This way we send in the proper order and any
6525 * sacks that come floating in will "re-ack" the data.
6526 * To do this we zap the tmap with an INIT and then
6527 * walk through and place every rsm in the RB tree
6528 * back in its seq ordered place.
6530 TAILQ_INIT(&rack->r_ctl.rc_tmap);
6531 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
6533 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
6534 /* We must re-add it back to the tlist */
6536 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
6538 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
6542 if (rsm->r_flags & RACK_ACKED)
6543 rsm->r_flags |= RACK_WAS_ACKED;
6544 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS);
6546 /* Clear the count (we just un-acked them) */
6547 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una;
6548 rack->r_ctl.rc_sacked = 0;
6549 rack->r_ctl.rc_sacklast = NULL;
6550 rack->r_ctl.rc_agg_delayed = 0;
6552 rack->r_ctl.rc_agg_early = 0;
6554 /* Clear the tlp rtx mark */
6555 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6556 if (rack->r_ctl.rc_resend != NULL)
6557 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT;
6558 rack->r_ctl.rc_prr_sndcnt = 0;
6559 rack_log_to_prr(rack, 6, 0);
6560 rack->r_timer_override = 1;
6561 if ((((tp->t_flags & TF_SACK_PERMIT) == 0)
6562 #ifdef NETFLIX_EXP_DETECTION
6563 || (rack->sack_attack_disable != 0)
6565 ) && ((tp->t_flags & TF_SENTFIN) == 0)) {
6567 * For non-sack customers new data
6568 * needs to go out as retransmits until
6569 * we retransmit up to snd_max.
6571 rack->r_must_retran = 1;
6572 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp,
6573 rack->r_ctl.rc_sacked);
6575 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
6579 rack_convert_rtts(struct tcpcb *tp)
6581 if (tp->t_srtt > 1) {
6584 val = tp->t_srtt >> TCP_RTT_SHIFT;
6585 frac = tp->t_srtt & 0x1f;
6586 tp->t_srtt = TICKS_2_USEC(val);
6588 * frac is the fractional part of the srtt (if any)
6589 * but its in ticks and every bit represents
6594 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE);
6596 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE));
6604 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT;
6605 frac = tp->t_rttvar & 0x1f;
6606 tp->t_rttvar = TICKS_2_USEC(val);
6608 * frac is the fractional part of the srtt (if any)
6609 * but its in ticks and every bit represents
6614 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE);
6616 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE));
6618 tp->t_rttvar += frac;
6621 tp->t_rxtcur = RACK_REXMTVAL(tp);
6622 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
6623 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop);
6625 if (tp->t_rxtcur > rack_rto_max) {
6626 tp->t_rxtcur = rack_rto_max;
6631 rack_cc_conn_init(struct tcpcb *tp)
6633 struct tcp_rack *rack;
6636 rack = (struct tcp_rack *)tp->t_fb_ptr;
6640 * Now convert to rack's internal format,
6643 if ((srtt == 0) && (tp->t_srtt != 0))
6644 rack_convert_rtts(tp);
6646 * We want a chance to stay in slowstart as
6647 * we create a connection. TCP spec says that
6648 * initially ssthresh is infinite. For our
6649 * purposes that is the snd_wnd.
6651 if (tp->snd_ssthresh < tp->snd_wnd) {
6652 tp->snd_ssthresh = tp->snd_wnd;
6655 * We also want to assure a IW worth of
6656 * data can get inflight.
6658 if (rc_init_window(rack) < tp->snd_cwnd)
6659 tp->snd_cwnd = rc_init_window(rack);
6663 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
6664 * we will setup to retransmit the lowest seq number outstanding.
6667 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6675 if (tp->t_timers->tt_flags & TT_STOPPED) {
6678 if (ctf_progress_timeout_check(tp, false)) {
6679 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
6680 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6681 tcp_set_inp_to_drop(inp, ETIMEDOUT);
6684 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
6685 rack->r_ctl.retran_during_recovery = 0;
6686 rack->r_ctl.dsack_byte_cnt = 0;
6687 if (IN_FASTRECOVERY(tp->t_flags))
6688 tp->t_flags |= TF_WASFRECOVERY;
6690 tp->t_flags &= ~TF_WASFRECOVERY;
6691 if (IN_CONGRECOVERY(tp->t_flags))
6692 tp->t_flags |= TF_WASCRECOVERY;
6694 tp->t_flags &= ~TF_WASCRECOVERY;
6695 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
6696 (tp->snd_una == tp->snd_max)) {
6697 /* Nothing outstanding .. nothing to do */
6701 * Rack can only run one timer at a time, so we cannot
6702 * run a KEEPINIT (gating SYN sending) and a retransmit
6703 * timer for the SYN. So if we are in a front state and
6704 * have a KEEPINIT timer we need to check the first transmit
6705 * against now to see if we have exceeded the KEEPINIT time
6708 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
6709 (TP_KEEPINIT(tp) != 0)) {
6710 struct rack_sendmap *rsm;
6712 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6714 /* Ok we have something outstanding to test keepinit with */
6715 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) &&
6716 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) {
6717 /* We have exceeded the KEEPINIT time */
6718 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
6724 * Retransmission timer went off. Message has not been acked within
6725 * retransmit interval. Back off to a longer retransmit interval
6726 * and retransmit one segment.
6729 if ((rack->r_ctl.rc_resend == NULL) ||
6730 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) {
6732 * If the rwnd collapsed on
6733 * the one we are retransmitting
6734 * it does not count against the
6739 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) {
6740 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
6742 tp->t_rxtshift = TCP_MAXRXTSHIFT;
6743 KMOD_TCPSTAT_INC(tcps_timeoutdrop);
6745 tcp_set_inp_to_drop(rack->rc_inp,
6746 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT));
6749 if (tp->t_state == TCPS_SYN_SENT) {
6751 * If the SYN was retransmitted, indicate CWND to be limited
6752 * to 1 segment in cc_conn_init().
6755 } else if (tp->t_rxtshift == 1) {
6757 * first retransmit; record ssthresh and cwnd so they can be
6758 * recovered if this turns out to be a "bad" retransmit. A
6759 * retransmit is considered "bad" if an ACK for this segment
6760 * is received within RTT/2 interval; the assumption here is
6761 * that the ACK was already in flight. See "On Estimating
6762 * End-to-End Network Path Properties" by Allman and Paxson
6765 tp->snd_cwnd_prev = tp->snd_cwnd;
6766 tp->snd_ssthresh_prev = tp->snd_ssthresh;
6767 tp->snd_recover_prev = tp->snd_recover;
6768 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2);
6769 tp->t_flags |= TF_PREVVALID;
6770 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0)
6771 tp->t_flags &= ~TF_PREVVALID;
6772 KMOD_TCPSTAT_INC(tcps_rexmttimeo);
6773 if ((tp->t_state == TCPS_SYN_SENT) ||
6774 (tp->t_state == TCPS_SYN_RECEIVED))
6775 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift];
6777 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift];
6779 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt,
6780 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop);
6782 * We enter the path for PLMTUD if connection is established or, if
6783 * connection is FIN_WAIT_1 status, reason for the last is that if
6784 * amount of data we send is very small, we could send it in couple
6785 * of packets and process straight to FIN. In that case we won't
6786 * catch ESTABLISHED state.
6789 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false;
6793 if (((V_tcp_pmtud_blackhole_detect == 1) ||
6794 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) ||
6795 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) &&
6796 ((tp->t_state == TCPS_ESTABLISHED) ||
6797 (tp->t_state == TCPS_FIN_WAIT_1))) {
6799 * Idea here is that at each stage of mtu probe (usually,
6800 * 1448 -> 1188 -> 524) should be given 2 chances to recover
6801 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
6802 * should take care of that.
6804 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
6805 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
6806 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
6807 tp->t_rxtshift % 2 == 0)) {
6809 * Enter Path MTU Black-hole Detection mechanism: -
6810 * Disable Path MTU Discovery (IP "DF" bit). -
6811 * Reduce MTU to lower value than what we negotiated
6814 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
6815 /* Record that we may have found a black hole. */
6816 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
6817 /* Keep track of previous MSS. */
6818 tp->t_pmtud_saved_maxseg = tp->t_maxseg;
6822 * Reduce the MSS to blackhole value or to the
6823 * default in an attempt to retransmit.
6827 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
6828 /* Use the sysctl tuneable blackhole MSS. */
6829 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
6830 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
6831 } else if (isipv6) {
6832 /* Use the default MSS. */
6833 tp->t_maxseg = V_tcp_v6mssdflt;
6835 * Disable Path MTU Discovery when we switch
6838 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
6839 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
6842 #if defined(INET6) && defined(INET)
6846 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
6847 /* Use the sysctl tuneable blackhole MSS. */
6848 tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
6849 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
6851 /* Use the default MSS. */
6852 tp->t_maxseg = V_tcp_mssdflt;
6854 * Disable Path MTU Discovery when we switch
6857 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
6858 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
6863 * If further retransmissions are still unsuccessful
6864 * with a lowered MTU, maybe this isn't a blackhole
6865 * and we restore the previous MSS and blackhole
6866 * detection flags. The limit '6' is determined by
6867 * giving each probe stage (1448, 1188, 524) 2
6868 * chances to recover.
6870 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
6871 (tp->t_rxtshift >= 6)) {
6872 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
6873 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
6874 tp->t_maxseg = tp->t_pmtud_saved_maxseg;
6875 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed);
6880 * Disable RFC1323 and SACK if we haven't got any response to
6881 * our third SYN to work-around some broken terminal servers
6882 * (most of which have hopefully been retired) that have bad VJ
6883 * header compression code which trashes TCP segments containing
6884 * unknown-to-them TCP options.
6886 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
6887 (tp->t_rxtshift == 3))
6888 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT);
6890 * If we backed off this far, our srtt estimate is probably bogus.
6891 * Clobber it so we'll take the next rtt measurement as our srtt;
6892 * move the current srtt into rttvar to keep the current retransmit
6895 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
6897 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
6898 in6_losing(tp->t_inpcb);
6901 in_losing(tp->t_inpcb);
6902 tp->t_rttvar += tp->t_srtt;
6905 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
6906 tp->snd_recover = tp->snd_max;
6907 tp->t_flags |= TF_ACKNOW;
6909 rack_cong_signal(tp, CC_RTO, tp->snd_una);
6915 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling)
6918 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
6923 if (tp->t_state == TCPS_LISTEN) {
6924 /* no timers on listen sockets */
6925 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
6929 if ((timers & PACE_TMR_RACK) &&
6930 rack->rc_on_min_to) {
6932 * For the rack timer when we
6933 * are on a min-timeout (which means rrr_conf = 3)
6934 * we don't want to check the timer. It may
6935 * be going off for a pace and thats ok we
6936 * want to send the retransmit (if its ready).
6938 * If its on a normal rack timer (non-min) then
6939 * we will check if its expired.
6941 goto skip_time_check;
6943 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
6946 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
6948 rack_log_to_processing(rack, cts, ret, 0);
6951 if (hpts_calling == 0) {
6953 * A user send or queued mbuf (sack) has called us? We
6954 * return 0 and let the pacing guards
6955 * deal with it if they should or
6956 * should not cause a send.
6959 rack_log_to_processing(rack, cts, ret, 0);
6963 * Ok our timer went off early and we are not paced false
6964 * alarm, go back to sleep.
6967 left = rack->r_ctl.rc_timer_exp - cts;
6968 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left));
6969 rack_log_to_processing(rack, cts, ret, left);
6973 rack->rc_tmr_stopped = 0;
6974 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
6975 if (timers & PACE_TMR_DELACK) {
6976 ret = rack_timeout_delack(tp, rack, cts);
6977 } else if (timers & PACE_TMR_RACK) {
6978 rack->r_ctl.rc_tlp_rxt_last_time = cts;
6979 rack->r_fast_output = 0;
6980 ret = rack_timeout_rack(tp, rack, cts);
6981 } else if (timers & PACE_TMR_TLP) {
6982 rack->r_ctl.rc_tlp_rxt_last_time = cts;
6983 ret = rack_timeout_tlp(tp, rack, cts);
6984 } else if (timers & PACE_TMR_RXT) {
6985 rack->r_ctl.rc_tlp_rxt_last_time = cts;
6986 rack->r_fast_output = 0;
6987 ret = rack_timeout_rxt(tp, rack, cts);
6988 } else if (timers & PACE_TMR_PERSIT) {
6989 ret = rack_timeout_persist(tp, rack, cts);
6990 } else if (timers & PACE_TMR_KEEP) {
6991 ret = rack_timeout_keepalive(tp, rack, cts);
6993 rack_log_to_processing(rack, cts, ret, timers);
6998 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
7001 uint32_t us_cts, flags_on_entry;
7002 uint8_t hpts_removed = 0;
7004 flags_on_entry = rack->r_ctl.rc_hpts_flags;
7005 us_cts = tcp_get_usecs(&tv);
7006 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
7007 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) ||
7008 ((tp->snd_max - tp->snd_una) == 0))) {
7009 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
7011 /* If we were not delayed cancel out the flag. */
7012 if ((tp->snd_max - tp->snd_una) == 0)
7013 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
7014 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
7016 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
7017 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
7018 if (rack->rc_inp->inp_in_hpts &&
7019 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
7021 * Canceling timer's when we have no output being
7022 * paced. We also must remove ourselves from the
7025 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
7028 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
7030 if (hpts_removed == 0)
7031 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
7035 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type)
7041 rack_stopall(struct tcpcb *tp)
7043 struct tcp_rack *rack;
7044 rack = (struct tcp_rack *)tp->t_fb_ptr;
7045 rack->t_timers_stopped = 1;
7050 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta)
7056 rack_timer_active(struct tcpcb *tp, uint32_t timer_type)
7062 rack_stop_all_timers(struct tcpcb *tp)
7064 struct tcp_rack *rack;
7067 * Assure no timers are running.
7069 if (tcp_timer_active(tp, TT_PERSIST)) {
7070 /* We enter in persists, set the flag appropriately */
7071 rack = (struct tcp_rack *)tp->t_fb_ptr;
7072 rack->rc_in_persist = 1;
7074 tcp_timer_suspend(tp, TT_PERSIST);
7075 tcp_timer_suspend(tp, TT_REXMT);
7076 tcp_timer_suspend(tp, TT_KEEP);
7077 tcp_timer_suspend(tp, TT_DELACK);
7081 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
7082 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag)
7085 uint16_t stripped_flags;
7088 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7090 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
7091 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
7092 rsm->r_flags |= RACK_OVERMAX;
7094 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) {
7095 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
7096 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
7098 idx = rsm->r_rtr_cnt - 1;
7099 rsm->r_tim_lastsent[idx] = ts;
7100 stripped_flags = rsm->r_flags & ~(RACK_SENT_SP|RACK_SENT_FP);
7101 if (rsm->r_flags & RACK_ACKED) {
7102 /* Problably MTU discovery messing with us */
7103 rsm->r_flags &= ~RACK_ACKED;
7104 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
7106 if (rsm->r_in_tmap) {
7107 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7110 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7112 if (rsm->r_flags & RACK_SACK_PASSED) {
7113 /* We have retransmitted due to the SACK pass */
7114 rsm->r_flags &= ~RACK_SACK_PASSED;
7115 rsm->r_flags |= RACK_WAS_SACKPASS;
7120 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
7121 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag)
7124 * We (re-)transmitted starting at rsm->r_start for some length
7125 * (possibly less than r_end.
7127 struct rack_sendmap *nrsm, *insret;
7132 c_end = rsm->r_start + len;
7133 if (SEQ_GEQ(c_end, rsm->r_end)) {
7135 * We retransmitted the whole piece or more than the whole
7136 * slopping into the next rsm.
7138 rack_update_rsm(tp, rack, rsm, ts, add_flag);
7139 if (c_end == rsm->r_end) {
7145 /* Hangs over the end return whats left */
7146 act_len = rsm->r_end - rsm->r_start;
7147 *lenp = (len - act_len);
7148 return (rsm->r_end);
7150 /* We don't get out of this block. */
7153 * Here we retransmitted less than the whole thing which means we
7154 * have to split this into what was transmitted and what was not.
7156 nrsm = rack_alloc_full_limit(rack);
7159 * We can't get memory, so lets not proceed.
7165 * So here we are going to take the original rsm and make it what we
7166 * retransmitted. nrsm will be the tail portion we did not
7167 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
7168 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
7169 * 1, 6 and the new piece will be 6, 11.
7171 rack_clone_rsm(rack, nrsm, rsm, c_end);
7173 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
7174 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7176 if (insret != NULL) {
7177 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7178 nrsm, insret, rack, rsm);
7181 if (rsm->r_in_tmap) {
7182 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7183 nrsm->r_in_tmap = 1;
7185 rsm->r_flags &= (~RACK_HAS_FIN);
7186 rack_update_rsm(tp, rack, rsm, ts, add_flag);
7187 /* Log a split of rsm into rsm and nrsm */
7188 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
7194 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
7195 uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t cts,
7196 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls)
7198 struct tcp_rack *rack;
7199 struct rack_sendmap *rsm, *nrsm, *insret, fe;
7200 register uint32_t snd_max, snd_una;
7203 * Add to the RACK log of packets in flight or retransmitted. If
7204 * there is a TS option we will use the TS echoed, if not we will
7207 * Retransmissions will increment the count and move the ts to its
7208 * proper place. Note that if options do not include TS's then we
7209 * won't be able to effectively use the ACK for an RTT on a retran.
7211 * Notes about r_start and r_end. Lets consider a send starting at
7212 * sequence 1 for 10 bytes. In such an example the r_start would be
7213 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
7214 * This means that r_end is actually the first sequence for the next
7219 * If err is set what do we do XXXrrs? should we not add the thing?
7220 * -- i.e. return if err != 0 or should we pretend we sent it? --
7221 * i.e. proceed with add ** do this for now.
7223 INP_WLOCK_ASSERT(tp->t_inpcb);
7226 * We don't log errors -- we could but snd_max does not
7227 * advance in this case either.
7231 if (th_flags & TH_RST) {
7233 * We don't log resets and we return immediately from
7238 rack = (struct tcp_rack *)tp->t_fb_ptr;
7239 snd_una = tp->snd_una;
7240 snd_max = tp->snd_max;
7241 if (th_flags & (TH_SYN | TH_FIN)) {
7243 * The call to rack_log_output is made before bumping
7244 * snd_max. This means we can record one extra byte on a SYN
7245 * or FIN if seq_out is adding more on and a FIN is present
7246 * (and we are not resending).
7248 if ((th_flags & TH_SYN) && (seq_out == tp->iss))
7250 if (th_flags & TH_FIN)
7252 if (SEQ_LT(snd_max, tp->snd_nxt)) {
7254 * The add/update as not been done for the FIN/SYN
7257 snd_max = tp->snd_nxt;
7260 if (SEQ_LEQ((seq_out + len), snd_una)) {
7261 /* Are sending an old segment to induce an ack (keep-alive)? */
7264 if (SEQ_LT(seq_out, snd_una)) {
7265 /* huh? should we panic? */
7268 end = seq_out + len;
7270 if (SEQ_GEQ(end, seq_out))
7271 len = end - seq_out;
7276 /* We don't log zero window probes */
7279 rack->r_ctl.rc_time_last_sent = cts;
7280 if (IN_FASTRECOVERY(tp->t_flags)) {
7281 rack->r_ctl.rc_prr_out += len;
7283 /* First question is it a retransmission or new? */
7284 if (seq_out == snd_max) {
7287 rsm = rack_alloc(rack);
7290 * Hmm out of memory and the tcb got destroyed while
7295 if (th_flags & TH_FIN) {
7296 rsm->r_flags = RACK_HAS_FIN|add_flag;
7298 rsm->r_flags = add_flag;
7302 rsm->r_tim_lastsent[0] = cts;
7304 rsm->r_rtr_bytes = 0;
7305 if (th_flags & TH_SYN) {
7306 /* The data space is one beyond snd_una */
7307 rsm->r_flags |= RACK_HAS_SYN;
7309 rsm->r_start = seq_out;
7310 rsm->r_end = rsm->r_start + len;
7313 * save off the mbuf location that
7314 * sndmbuf_noadv returned (which is
7315 * where we started copying from)..
7319 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */
7321 if (rsm->m->m_len <= rsm->soff) {
7323 * XXXrrs Question, will this happen?
7325 * If sbsndptr is set at the correct place
7326 * then s_moff should always be somewhere
7327 * within rsm->m. But if the sbsndptr was
7328 * off then that won't be true. If it occurs
7329 * we need to walkout to the correct location.
7334 while (lm->m_len <= rsm->soff) {
7335 rsm->soff -= lm->m_len;
7337 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u",
7338 __func__, rack, s_moff, s_mb, rsm->soff));
7341 counter_u64_add(rack_sbsndptr_wrong, 1);
7343 counter_u64_add(rack_sbsndptr_right, 1);
7344 rsm->orig_m_len = rsm->m->m_len;
7346 rsm->orig_m_len = 0;
7347 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7349 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__);
7350 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7352 if (insret != NULL) {
7353 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7354 nrsm, insret, rack, rsm);
7357 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7360 * Special case detection, is there just a single
7361 * packet outstanding when we are not in recovery?
7363 * If this is true mark it so.
7365 if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
7366 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) {
7367 struct rack_sendmap *prsm;
7369 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7371 prsm->r_one_out_nr = 1;
7376 * If we reach here its a retransmission and we need to find it.
7378 memset(&fe, 0, sizeof(fe));
7380 if (hintrsm && (hintrsm->r_start == seq_out)) {
7384 /* No hints sorry */
7387 if ((rsm) && (rsm->r_start == seq_out)) {
7388 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag);
7395 /* Ok it was not the last pointer go through it the hard way. */
7397 fe.r_start = seq_out;
7398 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
7400 if (rsm->r_start == seq_out) {
7401 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag);
7408 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
7409 /* Transmitted within this piece */
7411 * Ok we must split off the front and then let the
7412 * update do the rest
7414 nrsm = rack_alloc_full_limit(rack);
7416 rack_update_rsm(tp, rack, rsm, cts, add_flag);
7420 * copy rsm to nrsm and then trim the front of rsm
7421 * to not include this part.
7423 rack_clone_rsm(rack, nrsm, rsm, seq_out);
7424 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7425 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
7427 if (insret != NULL) {
7428 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7429 nrsm, insret, rack, rsm);
7432 if (rsm->r_in_tmap) {
7433 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7434 nrsm->r_in_tmap = 1;
7436 rsm->r_flags &= (~RACK_HAS_FIN);
7437 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag);
7445 * Hmm not found in map did they retransmit both old and on into the
7448 if (seq_out == tp->snd_max) {
7450 } else if (SEQ_LT(seq_out, tp->snd_max)) {
7452 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
7453 seq_out, len, tp->snd_una, tp->snd_max);
7454 printf("Starting Dump of all rack entries\n");
7455 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
7456 printf("rsm:%p start:%u end:%u\n",
7457 rsm, rsm->r_start, rsm->r_end);
7459 printf("Dump complete\n");
7460 panic("seq_out not found rack:%p tp:%p",
7466 * Hmm beyond sndmax? (only if we are using the new rtt-pack
7469 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
7470 seq_out, len, tp->snd_max, tp);
7476 * Record one of the RTT updates from an ack into
7477 * our sample structure.
7481 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt,
7482 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt)
7484 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7485 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
7486 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
7488 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7489 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
7490 rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
7492 if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
7493 if (us_rtt < rack->r_ctl.rc_gp_lowrtt)
7494 rack->r_ctl.rc_gp_lowrtt = us_rtt;
7495 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd)
7496 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
7498 if ((confidence == 1) &&
7500 (rsm->r_just_ret) ||
7501 (rsm->r_one_out_nr &&
7502 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) {
7504 * If the rsm had a just return
7505 * hit it then we can't trust the
7506 * rtt measurement for buffer deterimination
7507 * Note that a confidence of 2, indicates
7508 * SACK'd which overrides the r_just_ret or
7509 * the r_one_out_nr. If it was a CUM-ACK and
7510 * we had only two outstanding, but get an
7511 * ack for only 1. Then that also lowers our
7516 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7517 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) {
7518 if (rack->r_ctl.rack_rs.confidence == 0) {
7520 * We take anything with no current confidence
7523 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
7524 rack->r_ctl.rack_rs.confidence = confidence;
7525 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
7526 } else if (confidence || rack->r_ctl.rack_rs.confidence) {
7528 * Once we have a confident number,
7529 * we can update it with a smaller
7530 * value since this confident number
7531 * may include the DSACK time until
7532 * the next segment (the second one) arrived.
7534 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
7535 rack->r_ctl.rack_rs.confidence = confidence;
7536 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
7539 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence);
7540 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
7541 rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
7542 rack->r_ctl.rack_rs.rs_rtt_cnt++;
7546 * Collect new round-trip time estimate
7547 * and update averages and current timeout.
7550 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
7553 uint32_t o_srtt, o_var;
7554 int32_t hrtt_up = 0;
7557 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
7558 /* No valid sample */
7560 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
7561 /* We are to use the lowest RTT seen in a single ack */
7562 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
7563 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
7564 /* We are to use the highest RTT seen in a single ack */
7565 rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
7566 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
7567 /* We are to use the average RTT seen in a single ack */
7568 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
7569 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
7572 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
7578 if (rack->rc_gp_rtt_set == 0) {
7580 * With no RTT we have to accept
7581 * even one we are not confident of.
7583 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt;
7584 rack->rc_gp_rtt_set = 1;
7585 } else if (rack->r_ctl.rack_rs.confidence) {
7586 /* update the running gp srtt */
7587 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8);
7588 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8;
7590 if (rack->r_ctl.rack_rs.confidence) {
7592 * record the low and high for highly buffered path computation,
7593 * we only do this if we are confident (not a retransmission).
7595 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) {
7596 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7599 if (rack->rc_highly_buffered == 0) {
7601 * Currently once we declare a path has
7602 * highly buffered there is no going
7603 * back, which may be a problem...
7605 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) {
7606 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt,
7607 rack->r_ctl.rc_highest_us_rtt,
7608 rack->r_ctl.rc_lowest_us_rtt,
7610 rack->rc_highly_buffered = 1;
7614 if ((rack->r_ctl.rack_rs.confidence) ||
7615 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) {
7617 * If we are highly confident of it <or> it was
7618 * never retransmitted we accept it as the last us_rtt.
7620 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7621 /* The lowest rtt can be set if its was not retransmited */
7622 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) {
7623 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7624 if (rack->r_ctl.rc_lowest_us_rtt == 0)
7625 rack->r_ctl.rc_lowest_us_rtt = 1;
7628 o_srtt = tp->t_srtt;
7629 o_var = tp->t_rttvar;
7630 rack = (struct tcp_rack *)tp->t_fb_ptr;
7631 if (tp->t_srtt != 0) {
7633 * We keep a simple srtt in microseconds, like our rtt
7634 * measurement. We don't need to do any tricks with shifting
7635 * etc. Instead we just add in 1/8th of the new measurement
7636 * and subtract out 1/8 of the old srtt. We do the same with
7637 * the variance after finding the absolute value of the
7638 * difference between this sample and the current srtt.
7640 delta = tp->t_srtt - rtt;
7641 /* Take off 1/8th of the current sRTT */
7642 tp->t_srtt -= (tp->t_srtt >> 3);
7643 /* Add in 1/8th of the new RTT just measured */
7644 tp->t_srtt += (rtt >> 3);
7645 if (tp->t_srtt <= 0)
7647 /* Now lets make the absolute value of the variance */
7650 /* Subtract out 1/8th */
7651 tp->t_rttvar -= (tp->t_rttvar >> 3);
7652 /* Add in 1/8th of the new variance we just saw */
7653 tp->t_rttvar += (delta >> 3);
7654 if (tp->t_rttvar <= 0)
7656 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
7657 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
7660 * No rtt measurement yet - use the unsmoothed rtt. Set the
7661 * variance to half the rtt (so our first retransmit happens
7665 tp->t_rttvar = rtt >> 1;
7666 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
7668 rack->rc_srtt_measure_made = 1;
7669 KMOD_TCPSTAT_INC(tcps_rttupdated);
7672 if (rack_stats_gets_ms_rtt == 0) {
7673 /* Send in the microsecond rtt used for rxt timeout purposes */
7674 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
7675 } else if (rack_stats_gets_ms_rtt == 1) {
7676 /* Send in the millisecond rtt used for rxt timeout purposes */
7680 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
7681 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
7682 } else if (rack_stats_gets_ms_rtt == 2) {
7683 /* Send in the millisecond rtt has close to the path RTT as we can get */
7687 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
7688 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
7690 /* Send in the microsecond rtt has close to the path RTT as we can get */
7691 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt));
7696 * the retransmit should happen at rtt + 4 * rttvar. Because of the
7697 * way we do the smoothing, srtt and rttvar will each average +1/2
7698 * tick of bias. When we compute the retransmit timer, we want 1/2
7699 * tick of rounding and 1 extra tick because of +-1/2 tick
7700 * uncertainty in the firing of the timer. The bias will give us
7701 * exactly the 1.5 tick we need. But, because the bias is
7702 * statistical, we have to test that we don't drop below the minimum
7703 * feasible timer (which is 2 ticks).
7706 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
7707 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop);
7708 rack_log_rtt_sample(rack, rtt);
7709 tp->t_softerror = 0;
7714 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts)
7717 * Apply to filter the inbound us-rtt at us_cts.
7721 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
7722 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt,
7724 if (rack->r_ctl.last_pacing_time &&
7725 rack->rc_gp_dyn_mul &&
7726 (rack->r_ctl.last_pacing_time > us_rtt))
7727 rack->pacing_longer_than_rtt = 1;
7729 rack->pacing_longer_than_rtt = 0;
7730 if (old_rtt > us_rtt) {
7731 /* We just hit a new lower rtt time */
7732 rack_log_rtt_shrinks(rack, us_cts, old_rtt,
7733 __LINE__, RACK_RTTS_NEWRTT);
7735 * Only count it if its lower than what we saw within our
7738 if ((old_rtt - us_rtt) > rack_min_rtt_movement) {
7739 if (rack_probertt_lower_within &&
7740 rack->rc_gp_dyn_mul &&
7741 (rack->use_fixed_rate == 0) &&
7742 (rack->rc_always_pace)) {
7744 * We are seeing a new lower rtt very close
7745 * to the time that we would have entered probe-rtt.
7746 * This is probably due to the fact that a peer flow
7747 * has entered probe-rtt. Lets go in now too.
7751 val = rack_probertt_lower_within * rack_time_between_probertt;
7753 if ((rack->in_probe_rtt == 0) &&
7754 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) {
7755 rack_enter_probertt(rack, us_cts);
7758 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
7764 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
7765 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack)
7768 uint32_t t, len_acked;
7770 if ((rsm->r_flags & RACK_ACKED) ||
7771 (rsm->r_flags & RACK_WAS_ACKED))
7774 if (rsm->r_no_rtt_allowed) {
7778 if (ack_type == CUM_ACKED) {
7779 if (SEQ_GT(th_ack, rsm->r_end)) {
7780 len_acked = rsm->r_end - rsm->r_start;
7783 len_acked = th_ack - rsm->r_start;
7787 len_acked = rsm->r_end - rsm->r_start;
7790 if (rsm->r_rtr_cnt == 1) {
7793 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7796 if (!tp->t_rttlow || tp->t_rttlow > t)
7798 if (!rack->r_ctl.rc_rack_min_rtt ||
7799 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7800 rack->r_ctl.rc_rack_min_rtt = t;
7801 if (rack->r_ctl.rc_rack_min_rtt == 0) {
7802 rack->r_ctl.rc_rack_min_rtt = 1;
7805 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]))
7806 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
7808 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
7811 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
7812 if (ack_type == SACKED) {
7813 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1);
7814 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt);
7817 * We need to setup what our confidence
7820 * If the rsm was app limited and it is
7821 * less than a mss in length (the end
7822 * of the send) then we have a gap. If we
7823 * were app limited but say we were sending
7824 * multiple MSS's then we are more confident
7827 * When we are not app-limited then we see if
7828 * the rsm is being included in the current
7829 * measurement, we tell this by the app_limited_needs_set
7832 * Note that being cwnd blocked is not applimited
7833 * as well as the pacing delay between packets which
7834 * are sending only 1 or 2 MSS's also will show up
7835 * in the RTT. We probably need to examine this algorithm
7836 * a bit more and enhance it to account for the delay
7837 * between rsm's. We could do that by saving off the
7838 * pacing delay of each rsm (in an rsm) and then
7839 * factoring that in somehow though for now I am
7844 if (rsm->r_flags & RACK_APP_LIMITED) {
7845 if (all && (len_acked <= ctf_fixed_maxseg(tp)))
7849 } else if (rack->app_limited_needs_set == 0) {
7854 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2);
7855 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt,
7856 calc_conf, rsm, rsm->r_rtr_cnt);
7858 if ((rsm->r_flags & RACK_TLP) &&
7859 (!IN_FASTRECOVERY(tp->t_flags))) {
7860 /* Segment was a TLP and our retrans matched */
7861 if (rack->r_ctl.rc_tlp_cwnd_reduce) {
7862 rack->r_ctl.rc_rsm_start = tp->snd_max;
7863 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
7864 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
7865 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
7868 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
7869 /* New more recent rack_tmit_time */
7870 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7871 rack->rc_rack_rtt = t;
7876 * We clear the soft/rxtshift since we got an ack.
7877 * There is no assurance we will call the commit() function
7878 * so we need to clear these to avoid incorrect handling.
7881 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
7882 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
7883 tp->t_softerror = 0;
7884 if (to && (to->to_flags & TOF_TS) &&
7885 (ack_type == CUM_ACKED) &&
7887 ((rsm->r_flags & RACK_OVERMAX) == 0)) {
7889 * Now which timestamp does it match? In this block the ACK
7890 * must be coming from a previous transmission.
7892 for (i = 0; i < rsm->r_rtr_cnt; i++) {
7893 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) {
7894 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
7897 if ((i + 1) < rsm->r_rtr_cnt) {
7899 * The peer ack'd from our previous
7900 * transmission. We have a spurious
7901 * retransmission and thus we dont
7902 * want to update our rack_rtt.
7906 if (!tp->t_rttlow || tp->t_rttlow > t)
7908 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7909 rack->r_ctl.rc_rack_min_rtt = t;
7910 if (rack->r_ctl.rc_rack_min_rtt == 0) {
7911 rack->r_ctl.rc_rack_min_rtt = 1;
7914 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
7915 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
7916 /* New more recent rack_tmit_time */
7917 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7918 rack->rc_rack_rtt = t;
7920 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3);
7921 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm,
7929 * Ok its a SACK block that we retransmitted. or a windows
7930 * machine without timestamps. We can tell nothing from the
7931 * time-stamp since its not there or the time the peer last
7932 * recieved a segment that moved forward its cum-ack point.
7935 i = rsm->r_rtr_cnt - 1;
7936 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
7939 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7941 * We retransmitted and the ack came back in less
7942 * than the smallest rtt we have observed. We most
7943 * likely did an improper retransmit as outlined in
7944 * 6.2 Step 2 point 2 in the rack-draft so we
7945 * don't want to update our rack_rtt. We in
7946 * theory (in future) might want to think about reverting our
7947 * cwnd state but we won't for now.
7950 } else if (rack->r_ctl.rc_rack_min_rtt) {
7952 * We retransmitted it and the retransmit did the
7955 if (!rack->r_ctl.rc_rack_min_rtt ||
7956 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7957 rack->r_ctl.rc_rack_min_rtt = t;
7958 if (rack->r_ctl.rc_rack_min_rtt == 0) {
7959 rack->r_ctl.rc_rack_min_rtt = 1;
7962 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) {
7963 /* New more recent rack_tmit_time */
7964 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i];
7965 rack->rc_rack_rtt = t;
7974 * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
7977 rack_log_sack_passed(struct tcpcb *tp,
7978 struct tcp_rack *rack, struct rack_sendmap *rsm)
7980 struct rack_sendmap *nrsm;
7983 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
7984 rack_head, r_tnext) {
7986 /* Skip orginal segment he is acked */
7989 if (nrsm->r_flags & RACK_ACKED) {
7991 * Skip ack'd segments, though we
7992 * should not see these, since tmap
7993 * should not have ack'd segments.
7997 if (nrsm->r_flags & RACK_SACK_PASSED) {
7999 * We found one that is already marked
8000 * passed, we have been here before and
8001 * so all others below this are marked.
8005 nrsm->r_flags |= RACK_SACK_PASSED;
8006 nrsm->r_flags &= ~RACK_WAS_SACKPASS;
8011 rack_need_set_test(struct tcpcb *tp,
8012 struct tcp_rack *rack,
8013 struct rack_sendmap *rsm,
8019 if ((tp->t_flags & TF_GPUTINPROG) &&
8020 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
8022 * We were app limited, and this ack
8023 * butts up or goes beyond the point where we want
8024 * to start our next measurement. We need
8025 * to record the new gput_ts as here and
8026 * possibly update the start sequence.
8030 if (rsm->r_rtr_cnt > 1) {
8032 * This is a retransmit, can we
8033 * really make any assessment at this
8034 * point? We are not really sure of
8035 * the timestamp, is it this or the
8036 * previous transmission?
8038 * Lets wait for something better that
8039 * is not retransmitted.
8045 rack->app_limited_needs_set = 0;
8046 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
8047 /* Do we start at a new end? */
8048 if ((use_which == RACK_USE_BEG) &&
8049 SEQ_GEQ(rsm->r_start, tp->gput_seq)) {
8051 * When we get an ACK that just eats
8052 * up some of the rsm, we set RACK_USE_BEG
8053 * since whats at r_start (i.e. th_ack)
8054 * is left unacked and thats where the
8055 * measurement not starts.
8057 tp->gput_seq = rsm->r_start;
8058 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8060 if ((use_which == RACK_USE_END) &&
8061 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
8063 * We use the end when the cumack
8064 * is moving forward and completely
8065 * deleting the rsm passed so basically
8066 * r_end holds th_ack.
8068 * For SACK's we also want to use the end
8069 * since this piece just got sacked and
8070 * we want to target anything after that
8071 * in our measurement.
8073 tp->gput_seq = rsm->r_end;
8074 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8076 if (use_which == RACK_USE_END_OR_THACK) {
8078 * special case for ack moving forward,
8079 * not a sack, we need to move all the
8080 * way up to where this ack cum-ack moves
8083 if (SEQ_GT(th_ack, rsm->r_end))
8084 tp->gput_seq = th_ack;
8086 tp->gput_seq = rsm->r_end;
8087 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8089 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) {
8091 * We moved beyond this guy's range, re-calculate
8092 * the new end point.
8094 if (rack->rc_gp_filled == 0) {
8095 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
8097 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
8101 * We are moving the goal post, we may be able to clear the
8102 * measure_saw_probe_rtt flag.
8104 if ((rack->in_probe_rtt == 0) &&
8105 (rack->measure_saw_probe_rtt) &&
8106 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
8107 rack->measure_saw_probe_rtt = 0;
8108 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts,
8109 seq, tp->gput_seq, 0, 5, line, NULL);
8110 if (rack->rc_gp_filled &&
8111 ((tp->gput_ack - tp->gput_seq) <
8112 max(rc_init_window(rack), (MIN_GP_WIN *
8113 ctf_fixed_maxseg(tp))))) {
8114 uint32_t ideal_amount;
8116 ideal_amount = rack_get_measure_window(tp, rack);
8117 if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
8119 * There is no sense of continuing this measurement
8120 * because its too small to gain us anything we
8121 * trust. Skip it and that way we can start a new
8122 * measurement quicker.
8124 tp->t_flags &= ~TF_GPUTINPROG;
8125 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
8126 0, 0, 0, 6, __LINE__, NULL);
8129 * Reset the window further out.
8131 tp->gput_ack = tp->gput_seq + ideal_amount;
8138 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
8139 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two)
8141 uint32_t start, end, changed = 0;
8142 struct rack_sendmap stack_map;
8143 struct rack_sendmap *rsm, *nrsm, fe, *insret, *prev, *next;
8144 int32_t used_ref = 1;
8147 start = sack->start;
8150 memset(&fe, 0, sizeof(fe));
8152 if ((rsm == NULL) ||
8153 (SEQ_LT(end, rsm->r_start)) ||
8154 (SEQ_GEQ(start, rsm->r_end)) ||
8155 (SEQ_LT(start, rsm->r_start))) {
8157 * We are not in the right spot,
8158 * find the correct spot in the tree.
8162 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
8169 /* Ok we have an ACK for some piece of this rsm */
8170 if (rsm->r_start != start) {
8171 if ((rsm->r_flags & RACK_ACKED) == 0) {
8173 * Need to split this in two pieces the before and after,
8174 * the before remains in the map, the after must be
8175 * added. In other words we have:
8176 * rsm |--------------|
8180 * and nrsm will be the sacked piece
8183 * But before we start down that path lets
8184 * see if the sack spans over on top of
8185 * the next guy and it is already sacked.
8187 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8188 if (next && (next->r_flags & RACK_ACKED) &&
8189 SEQ_GEQ(end, next->r_start)) {
8191 * So the next one is already acked, and
8192 * we can thus by hookery use our stack_map
8193 * to reflect the piece being sacked and
8194 * then adjust the two tree entries moving
8195 * the start and ends around. So we start like:
8196 * rsm |------------| (not-acked)
8197 * next |-----------| (acked)
8198 * sackblk |-------->
8199 * We want to end like so:
8200 * rsm |------| (not-acked)
8201 * next |-----------------| (acked)
8203 * Where nrsm is a temporary stack piece we
8204 * use to update all the gizmos.
8206 /* Copy up our fudge block */
8208 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
8209 /* Now adjust our tree blocks */
8211 next->r_start = start;
8212 /* Now we must adjust back where next->m is */
8213 rack_setup_offset_for_rsm(rsm, next);
8215 /* We don't need to adjust rsm, it did not change */
8216 /* Clear out the dup ack count of the remainder */
8218 rsm->r_just_ret = 0;
8219 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8220 /* Now lets make sure our fudge block is right */
8221 nrsm->r_start = start;
8222 /* Now lets update all the stats and such */
8223 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
8224 if (rack->app_limited_needs_set)
8225 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
8226 changed += (nrsm->r_end - nrsm->r_start);
8227 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
8228 if (nrsm->r_flags & RACK_SACK_PASSED) {
8229 counter_u64_add(rack_reorder_seen, 1);
8230 rack->r_ctl.rc_reorder_ts = cts;
8233 * Now we want to go up from rsm (the
8234 * one left un-acked) to the next one
8235 * in the tmap. We do this so when
8236 * we walk backwards we include marking
8237 * sack-passed on rsm (The one passed in
8238 * is skipped since it is generally called
8239 * on something sacked before removing it
8242 if (rsm->r_in_tmap) {
8243 nrsm = TAILQ_NEXT(rsm, r_tnext);
8245 * Now that we have the next
8246 * one walk backwards from there.
8248 if (nrsm && nrsm->r_in_tmap)
8249 rack_log_sack_passed(tp, rack, nrsm);
8251 /* Now are we done? */
8252 if (SEQ_LT(end, next->r_end) ||
8253 (end == next->r_end)) {
8254 /* Done with block */
8257 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__);
8258 counter_u64_add(rack_sack_used_next_merge, 1);
8259 /* Postion for the next block */
8260 start = next->r_end;
8261 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next);
8266 * We can't use any hookery here, so we
8267 * need to split the map. We enter like
8271 * We will add the new block nrsm and
8272 * that will be the new portion, and then
8273 * fall through after reseting rsm. So we
8274 * split and look like this:
8278 * We then fall through reseting
8279 * rsm to nrsm, so the next block
8282 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
8285 * failed XXXrrs what can we do but loose the sack
8290 counter_u64_add(rack_sack_splits, 1);
8291 rack_clone_rsm(rack, nrsm, rsm, start);
8292 rsm->r_just_ret = 0;
8293 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8295 if (insret != NULL) {
8296 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
8297 nrsm, insret, rack, rsm);
8300 if (rsm->r_in_tmap) {
8301 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8302 nrsm->r_in_tmap = 1;
8304 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__);
8305 rsm->r_flags &= (~RACK_HAS_FIN);
8306 /* Position us to point to the new nrsm that starts the sack blk */
8310 /* Already sacked this piece */
8311 counter_u64_add(rack_sack_skipped_acked, 1);
8313 if (end == rsm->r_end) {
8314 /* Done with block */
8315 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8317 } else if (SEQ_LT(end, rsm->r_end)) {
8318 /* A partial sack to a already sacked block */
8320 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8324 * The end goes beyond this guy
8325 * repostion the start to the
8329 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8335 if (SEQ_GEQ(end, rsm->r_end)) {
8337 * The end of this block is either beyond this guy or right
8338 * at this guy. I.e.:
8344 if ((rsm->r_flags & RACK_ACKED) == 0) {
8345 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
8346 changed += (rsm->r_end - rsm->r_start);
8347 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
8348 if (rsm->r_in_tmap) /* should be true */
8349 rack_log_sack_passed(tp, rack, rsm);
8350 /* Is Reordering occuring? */
8351 if (rsm->r_flags & RACK_SACK_PASSED) {
8352 rsm->r_flags &= ~RACK_SACK_PASSED;
8353 counter_u64_add(rack_reorder_seen, 1);
8354 rack->r_ctl.rc_reorder_ts = cts;
8356 if (rack->app_limited_needs_set)
8357 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
8358 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8359 rsm->r_flags |= RACK_ACKED;
8360 rsm->r_flags &= ~RACK_TLP;
8361 if (rsm->r_in_tmap) {
8362 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8365 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__);
8367 counter_u64_add(rack_sack_skipped_acked, 1);
8370 if (end == rsm->r_end) {
8371 /* This block only - done, setup for next */
8375 * There is more not coverend by this rsm move on
8376 * to the next block in the RB tree.
8378 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8386 * The end of this sack block is smaller than
8391 if ((rsm->r_flags & RACK_ACKED) == 0) {
8392 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8393 if (prev && (prev->r_flags & RACK_ACKED)) {
8395 * Goal, we want the right remainder of rsm to shrink
8396 * in place and span from (rsm->r_start = end) to rsm->r_end.
8397 * We want to expand prev to go all the way
8398 * to prev->r_end <- end.
8399 * so in the tree we have before:
8400 * prev |--------| (acked)
8401 * rsm |-------| (non-acked)
8403 * We churn it so we end up with
8404 * prev |----------| (acked)
8405 * rsm |-----| (non-acked)
8406 * nrsm |-| (temporary)
8409 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
8412 /* Now adjust nrsm (stack copy) to be
8413 * the one that is the small
8414 * piece that was "sacked".
8418 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8420 * Now that the rsm has had its start moved forward
8421 * lets go ahead and get its new place in the world.
8423 rack_setup_offset_for_rsm(prev, rsm);
8425 * Now nrsm is our new little piece
8426 * that is acked (which was merged
8427 * to prev). Update the rtt and changed
8428 * based on that. Also check for reordering.
8430 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
8431 if (rack->app_limited_needs_set)
8432 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
8433 changed += (nrsm->r_end - nrsm->r_start);
8434 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
8435 if (nrsm->r_flags & RACK_SACK_PASSED) {
8436 counter_u64_add(rack_reorder_seen, 1);
8437 rack->r_ctl.rc_reorder_ts = cts;
8439 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__);
8441 counter_u64_add(rack_sack_used_prev_merge, 1);
8444 * This is the case where our previous
8445 * block is not acked either, so we must
8446 * split the block in two.
8448 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
8450 /* failed rrs what can we do but loose the sack info? */
8454 * In this case nrsm becomes
8455 * nrsm->r_start = end;
8456 * nrsm->r_end = rsm->r_end;
8457 * which is un-acked.
8459 * rsm->r_end = nrsm->r_start;
8460 * i.e. the remaining un-acked
8461 * piece is left on the left
8464 * So we start like this
8465 * rsm |----------| (not acked)
8467 * build it so we have
8469 * nrsm |------| (not acked)
8471 counter_u64_add(rack_sack_splits, 1);
8472 rack_clone_rsm(rack, nrsm, rsm, end);
8473 rsm->r_flags &= (~RACK_HAS_FIN);
8474 rsm->r_just_ret = 0;
8475 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8477 if (insret != NULL) {
8478 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
8479 nrsm, insret, rack, rsm);
8482 if (rsm->r_in_tmap) {
8483 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8484 nrsm->r_in_tmap = 1;
8487 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
8488 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
8489 changed += (rsm->r_end - rsm->r_start);
8490 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
8491 if (rsm->r_in_tmap) /* should be true */
8492 rack_log_sack_passed(tp, rack, rsm);
8493 /* Is Reordering occuring? */
8494 if (rsm->r_flags & RACK_SACK_PASSED) {
8495 rsm->r_flags &= ~RACK_SACK_PASSED;
8496 counter_u64_add(rack_reorder_seen, 1);
8497 rack->r_ctl.rc_reorder_ts = cts;
8499 if (rack->app_limited_needs_set)
8500 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
8501 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8502 rsm->r_flags |= RACK_ACKED;
8503 rsm->r_flags &= ~RACK_TLP;
8504 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__);
8505 if (rsm->r_in_tmap) {
8506 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8510 } else if (start != end){
8512 * The block was already acked.
8514 counter_u64_add(rack_sack_skipped_acked, 1);
8518 if (rsm && (rsm->r_flags & RACK_ACKED)) {
8520 * Now can we merge where we worked
8521 * with either the previous or
8524 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8526 if (next->r_flags & RACK_ACKED) {
8527 /* yep this and next can be merged */
8528 rsm = rack_merge_rsm(rack, rsm, next);
8529 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8533 /* Now what about the previous? */
8534 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8536 if (prev->r_flags & RACK_ACKED) {
8537 /* yep the previous and this can be merged */
8538 rsm = rack_merge_rsm(rack, prev, rsm);
8539 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8544 if (used_ref == 0) {
8545 counter_u64_add(rack_sack_proc_all, 1);
8547 counter_u64_add(rack_sack_proc_short, 1);
8549 /* Save off the next one for quick reference. */
8551 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8554 *prsm = rack->r_ctl.rc_sacklast = nrsm;
8555 /* Pass back the moved. */
8561 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
8563 struct rack_sendmap *tmap;
8566 while (rsm && (rsm->r_flags & RACK_ACKED)) {
8567 /* Its no longer sacked, mark it so */
8568 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
8570 if (rsm->r_in_tmap) {
8571 panic("rack:%p rsm:%p flags:0x%x in tmap?",
8572 rack, rsm, rsm->r_flags);
8575 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
8576 /* Rebuild it into our tmap */
8578 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8581 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
8584 tmap->r_in_tmap = 1;
8585 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8588 * Now lets possibly clear the sack filter so we start
8589 * recognizing sacks that cover this area.
8591 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
8596 rack_do_decay(struct tcp_rack *rack)
8600 #define timersub(tvp, uvp, vvp) \
8602 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
8603 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
8604 if ((vvp)->tv_usec < 0) { \
8606 (vvp)->tv_usec += 1000000; \
8610 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res);
8613 rack->r_ctl.input_pkt++;
8614 if ((rack->rc_in_persist) ||
8615 (res.tv_sec >= 1) ||
8616 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) {
8618 * Check for decay of non-SAD,
8619 * we want all SAD detection metrics to
8620 * decay 1/4 per second (or more) passed.
8624 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt;
8625 /* Update our saved tracking values */
8626 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt;
8627 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
8628 /* Now do we escape without decay? */
8629 #ifdef NETFLIX_EXP_DETECTION
8630 if (rack->rc_in_persist ||
8631 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) ||
8632 (pkt_delta < tcp_sad_low_pps)){
8634 * We don't decay idle connections
8635 * or ones that have a low input pps.
8639 /* Decay the counters */
8640 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count,
8642 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count,
8644 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra,
8646 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move,
8653 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to)
8655 struct rack_sendmap *rsm, *rm;
8658 * The ACK point is advancing to th_ack, we must drop off
8659 * the packets in the rack log and calculate any eligble
8662 rack->r_wanted_output = 1;
8664 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
8666 if ((th_ack - 1) == tp->iss) {
8668 * For the SYN incoming case we will not
8669 * have called tcp_output for the sending of
8670 * the SYN, so there will be no map. All
8671 * other cases should probably be a panic.
8675 if (tp->t_flags & TF_SENTFIN) {
8676 /* if we sent a FIN we often will not have map */
8680 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n",
8682 tp->t_state, th_ack, rack,
8683 tp->snd_una, tp->snd_max, tp->snd_nxt);
8687 if (SEQ_LT(th_ack, rsm->r_start)) {
8688 /* Huh map is missing this */
8690 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
8692 th_ack, tp->t_state, rack->r_state);
8696 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack);
8697 /* Now do we consume the whole thing? */
8698 if (SEQ_GEQ(th_ack, rsm->r_end)) {
8699 /* Its all consumed. */
8701 uint8_t newly_acked;
8703 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__);
8704 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
8705 rsm->r_rtr_bytes = 0;
8706 /* Record the time of highest cumack sent */
8707 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8708 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8711 panic("removing head in rack:%p rsm:%p rm:%p",
8715 if (rsm->r_in_tmap) {
8716 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8720 if (rsm->r_flags & RACK_ACKED) {
8722 * It was acked on the scoreboard -- remove
8725 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
8727 } else if (rsm->r_flags & RACK_SACK_PASSED) {
8729 * There are segments ACKED on the
8730 * scoreboard further up. We are seeing
8733 rsm->r_flags &= ~RACK_SACK_PASSED;
8734 counter_u64_add(rack_reorder_seen, 1);
8735 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8736 rsm->r_flags |= RACK_ACKED;
8737 rack->r_ctl.rc_reorder_ts = cts;
8738 if (rack->r_ent_rec_ns) {
8740 * We have sent no more, and we saw an sack
8743 rack->r_might_revert = 1;
8746 if ((rsm->r_flags & RACK_TO_REXT) &&
8747 (tp->t_flags & TF_RCVD_TSTMP) &&
8748 (to->to_flags & TOF_TS) &&
8749 (tp->t_flags & TF_PREVVALID)) {
8751 * We can use the timestamp to see
8752 * if this retransmission was from the
8753 * first transmit. If so we made a mistake.
8755 tp->t_flags &= ~TF_PREVVALID;
8756 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) {
8757 /* The first transmit is what this ack is for */
8758 rack_cong_signal(tp, CC_RTO_ERR, th_ack);
8761 left = th_ack - rsm->r_end;
8762 if (rack->app_limited_needs_set && newly_acked)
8763 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK);
8764 /* Free back to zone */
8765 rack_free(rack, rsm);
8769 /* Check for reneging */
8770 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
8771 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
8773 * The peer has moved snd_una up to
8774 * the edge of this send, i.e. one
8775 * that it had previously acked. The only
8776 * way that can be true if the peer threw
8777 * away data (space issues) that it had
8778 * previously sacked (else it would have
8779 * given us snd_una up to (rsm->r_end).
8780 * We need to undo the acked markings here.
8782 * Note we have to look to make sure th_ack is
8783 * our rsm->r_start in case we get an old ack
8784 * where th_ack is behind snd_una.
8786 rack_peer_reneges(rack, rsm, th_ack);
8790 if (rsm->r_flags & RACK_ACKED) {
8792 * It was acked on the scoreboard -- remove it from
8793 * total for the part being cum-acked.
8795 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
8798 * Clear the dup ack count for
8799 * the piece that remains.
8802 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8803 if (rsm->r_rtr_bytes) {
8805 * It was retransmitted adjust the
8806 * sack holes for what was acked.
8810 ack_am = (th_ack - rsm->r_start);
8811 if (ack_am >= rsm->r_rtr_bytes) {
8812 rack->r_ctl.rc_holes_rxt -= ack_am;
8813 rsm->r_rtr_bytes -= ack_am;
8817 * Update where the piece starts and record
8818 * the time of send of highest cumack sent.
8820 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8821 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__);
8822 /* Now we need to move our offset forward too */
8823 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) {
8824 /* Fix up the orig_m_len and possibly the mbuf offset */
8825 rack_adjust_orig_mlen(rsm);
8827 rsm->soff += (th_ack - rsm->r_start);
8828 rsm->r_start = th_ack;
8829 /* Now do we need to move the mbuf fwd too? */
8831 while (rsm->soff >= rsm->m->m_len) {
8832 rsm->soff -= rsm->m->m_len;
8833 rsm->m = rsm->m->m_next;
8834 KASSERT((rsm->m != NULL),
8835 (" nrsm:%p hit at soff:%u null m",
8838 rsm->orig_m_len = rsm->m->m_len;
8840 if (rack->app_limited_needs_set)
8841 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG);
8845 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack)
8847 struct rack_sendmap *rsm;
8848 int sack_pass_fnd = 0;
8850 if (rack->r_might_revert) {
8852 * Ok we have reordering, have not sent anything, we
8853 * might want to revert the congestion state if nothing
8854 * further has SACK_PASSED on it. Lets check.
8856 * We also get here when we have DSACKs come in for
8857 * all the data that we FR'd. Note that a rxt or tlp
8858 * timer clears this from happening.
8861 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
8862 if (rsm->r_flags & RACK_SACK_PASSED) {
8867 if (sack_pass_fnd == 0) {
8869 * We went into recovery
8870 * incorrectly due to reordering!
8874 rack->r_ent_rec_ns = 0;
8875 orig_cwnd = tp->snd_cwnd;
8876 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at_erec;
8877 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec;
8878 tp->snd_recover = tp->snd_una;
8879 rack_log_to_prr(rack, 14, orig_cwnd);
8880 EXIT_RECOVERY(tp->t_flags);
8882 rack->r_might_revert = 0;
8886 #ifdef NETFLIX_EXP_DETECTION
8888 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz)
8890 if ((rack->do_detection || tcp_force_detection) &&
8891 tcp_sack_to_ack_thresh &&
8892 tcp_sack_to_move_thresh &&
8893 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) {
8895 * We have thresholds set to find
8896 * possible attackers and disable sack.
8899 uint64_t ackratio, moveratio, movetotal;
8902 rack_log_sad(rack, 1);
8903 ackratio = (uint64_t)(rack->r_ctl.sack_count);
8904 ackratio *= (uint64_t)(1000);
8905 if (rack->r_ctl.ack_count)
8906 ackratio /= (uint64_t)(rack->r_ctl.ack_count);
8908 /* We really should not hit here */
8911 if ((rack->sack_attack_disable == 0) &&
8912 (ackratio > rack_highest_sack_thresh_seen))
8913 rack_highest_sack_thresh_seen = (uint32_t)ackratio;
8914 movetotal = rack->r_ctl.sack_moved_extra;
8915 movetotal += rack->r_ctl.sack_noextra_move;
8916 moveratio = rack->r_ctl.sack_moved_extra;
8917 moveratio *= (uint64_t)1000;
8919 moveratio /= movetotal;
8921 /* No moves, thats pretty good */
8924 if ((rack->sack_attack_disable == 0) &&
8925 (moveratio > rack_highest_move_thresh_seen))
8926 rack_highest_move_thresh_seen = (uint32_t)moveratio;
8927 if (rack->sack_attack_disable == 0) {
8928 if ((ackratio > tcp_sack_to_ack_thresh) &&
8929 (moveratio > tcp_sack_to_move_thresh)) {
8930 /* Disable sack processing */
8931 rack->sack_attack_disable = 1;
8932 if (rack->r_rep_attack == 0) {
8933 rack->r_rep_attack = 1;
8934 counter_u64_add(rack_sack_attacks_detected, 1);
8936 if (tcp_attack_on_turns_on_logging) {
8938 * Turn on logging, used for debugging
8941 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging;
8943 /* Clamp the cwnd at flight size */
8944 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd;
8945 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
8946 rack_log_sad(rack, 2);
8949 /* We are sack-disabled check for false positives */
8950 if ((ackratio <= tcp_restoral_thresh) ||
8951 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) {
8952 rack->sack_attack_disable = 0;
8953 rack_log_sad(rack, 3);
8954 /* Restart counting */
8955 rack->r_ctl.sack_count = 0;
8956 rack->r_ctl.sack_moved_extra = 0;
8957 rack->r_ctl.sack_noextra_move = 1;
8958 rack->r_ctl.ack_count = max(1,
8959 (bytes_this_ack / segsiz));
8961 if (rack->r_rep_reverse == 0) {
8962 rack->r_rep_reverse = 1;
8963 counter_u64_add(rack_sack_attacks_reversed, 1);
8965 /* Restore the cwnd */
8966 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd)
8967 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd;
8975 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end)
8980 if (SEQ_GT(end, start))
8985 * We keep track of how many DSACK blocks we get
8986 * after a recovery incident.
8988 rack->r_ctl.dsack_byte_cnt += am;
8989 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
8990 rack->r_ctl.retran_during_recovery &&
8991 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) {
8993 * False recovery most likely culprit is reordering. If
8994 * nothing else is missing we need to revert.
8996 rack->r_might_revert = 1;
8997 rack_handle_might_revert(rack->rc_tp, rack);
8998 rack->r_might_revert = 0;
8999 rack->r_ctl.retran_during_recovery = 0;
9000 rack->r_ctl.dsack_byte_cnt = 0;
9005 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack)
9007 /* Deal with changed and PRR here (in recovery only) */
9008 uint32_t pipe, snd_una;
9010 rack->r_ctl.rc_prr_delivered += changed;
9012 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) {
9014 * It is all outstanding, we are application limited
9015 * and thus we don't need more room to send anything.
9016 * Note we use tp->snd_una here and not th_ack because
9017 * the data as yet not been cut from the sb.
9019 rack->r_ctl.rc_prr_sndcnt = 0;
9022 /* Compute prr_sndcnt */
9023 if (SEQ_GT(tp->snd_una, th_ack)) {
9024 snd_una = tp->snd_una;
9028 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt;
9029 if (pipe > tp->snd_ssthresh) {
9032 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
9033 if (rack->r_ctl.rc_prr_recovery_fs > 0)
9034 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
9036 rack->r_ctl.rc_prr_sndcnt = 0;
9037 rack_log_to_prr(rack, 9, 0);
9041 if (sndcnt > (long)rack->r_ctl.rc_prr_out)
9042 sndcnt -= rack->r_ctl.rc_prr_out;
9045 rack->r_ctl.rc_prr_sndcnt = sndcnt;
9046 rack_log_to_prr(rack, 10, 0);
9050 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
9051 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
9054 if (changed > limit)
9056 limit += ctf_fixed_maxseg(tp);
9057 if (tp->snd_ssthresh > pipe) {
9058 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
9059 rack_log_to_prr(rack, 11, 0);
9061 rack->r_ctl.rc_prr_sndcnt = min(0, limit);
9062 rack_log_to_prr(rack, 12, 0);
9068 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck)
9071 struct tcp_rack *rack;
9072 struct rack_sendmap *rsm;
9073 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
9074 register uint32_t th_ack;
9075 int32_t i, j, k, num_sack_blks = 0;
9076 uint32_t cts, acked, ack_point, sack_changed = 0;
9077 int loop_start = 0, moved_two = 0;
9081 INP_WLOCK_ASSERT(tp->t_inpcb);
9082 if (th->th_flags & TH_RST) {
9083 /* We don't log resets */
9086 rack = (struct tcp_rack *)tp->t_fb_ptr;
9087 cts = tcp_get_usecs(NULL);
9088 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
9090 th_ack = th->th_ack;
9091 if (rack->sack_attack_disable == 0)
9092 rack_do_decay(rack);
9093 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) {
9095 * You only get credit for
9096 * MSS and greater (and you get extra
9097 * credit for larger cum-ack moves).
9101 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp);
9102 rack->r_ctl.ack_count += ac;
9103 counter_u64_add(rack_ack_total, ac);
9105 if (rack->r_ctl.ack_count > 0xfff00000) {
9107 * reduce the number to keep us under
9110 rack->r_ctl.ack_count /= 2;
9111 rack->r_ctl.sack_count /= 2;
9113 if (SEQ_GT(th_ack, tp->snd_una)) {
9114 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
9115 tp->t_acktime = ticks;
9117 if (rsm && SEQ_GT(th_ack, rsm->r_start))
9118 changed = th_ack - rsm->r_start;
9120 rack_process_to_cumack(tp, rack, th_ack, cts, to);
9122 if ((to->to_flags & TOF_SACK) == 0) {
9123 /* We are done nothing left and no sack. */
9124 rack_handle_might_revert(tp, rack);
9126 * For cases where we struck a dup-ack
9127 * with no SACK, add to the changes so
9128 * PRR will work right.
9130 if (dup_ack_struck && (changed == 0)) {
9131 changed += ctf_fixed_maxseg(rack->rc_tp);
9135 /* Sack block processing */
9136 if (SEQ_GT(th_ack, tp->snd_una))
9139 ack_point = tp->snd_una;
9140 for (i = 0; i < to->to_nsacks; i++) {
9141 bcopy((to->to_sacks + i * TCPOLEN_SACK),
9142 &sack, sizeof(sack));
9143 sack.start = ntohl(sack.start);
9144 sack.end = ntohl(sack.end);
9145 if (SEQ_GT(sack.end, sack.start) &&
9146 SEQ_GT(sack.start, ack_point) &&
9147 SEQ_LT(sack.start, tp->snd_max) &&
9148 SEQ_GT(sack.end, ack_point) &&
9149 SEQ_LEQ(sack.end, tp->snd_max)) {
9150 sack_blocks[num_sack_blks] = sack;
9152 #ifdef NETFLIX_STATS
9153 } else if (SEQ_LEQ(sack.start, th_ack) &&
9154 SEQ_LEQ(sack.end, th_ack)) {
9156 * Its a D-SACK block.
9158 tcp_record_dsack(sack.start, sack.end);
9160 rack_note_dsack(rack, sack.start, sack.end);
9164 * Sort the SACK blocks so we can update the rack scoreboard with
9167 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks,
9168 num_sack_blks, th->th_ack);
9169 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks);
9170 if (num_sack_blks == 0) {
9171 /* Nothing to sack (DSACKs?) */
9172 goto out_with_totals;
9174 if (num_sack_blks < 2) {
9175 /* Only one, we don't need to sort */
9178 /* Sort the sacks */
9179 for (i = 0; i < num_sack_blks; i++) {
9180 for (j = i + 1; j < num_sack_blks; j++) {
9181 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
9182 sack = sack_blocks[i];
9183 sack_blocks[i] = sack_blocks[j];
9184 sack_blocks[j] = sack;
9189 * Now are any of the sack block ends the same (yes some
9190 * implementations send these)?
9193 if (num_sack_blks == 0)
9194 goto out_with_totals;
9195 if (num_sack_blks > 1) {
9196 for (i = 0; i < num_sack_blks; i++) {
9197 for (j = i + 1; j < num_sack_blks; j++) {
9198 if (sack_blocks[i].end == sack_blocks[j].end) {
9200 * Ok these two have the same end we
9201 * want the smallest end and then
9202 * throw away the larger and start
9205 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
9207 * The second block covers
9208 * more area use that
9210 sack_blocks[i].start = sack_blocks[j].start;
9213 * Now collapse out the dup-sack and
9216 for (k = (j + 1); k < num_sack_blks; k++) {
9217 sack_blocks[j].start = sack_blocks[k].start;
9218 sack_blocks[j].end = sack_blocks[k].end;
9229 * First lets look to see if
9230 * we have retransmitted and
9231 * can use the transmit next?
9233 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
9235 SEQ_GT(sack_blocks[0].end, rsm->r_start) &&
9236 SEQ_LT(sack_blocks[0].start, rsm->r_end)) {
9238 * We probably did the FR and the next
9239 * SACK in continues as we would expect.
9241 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two);
9243 rack->r_wanted_output = 1;
9245 sack_changed += acked;
9247 if (num_sack_blks == 1) {
9249 * This is what we would expect from
9250 * a normal implementation to happen
9251 * after we have retransmitted the FR,
9252 * i.e the sack-filter pushes down
9253 * to 1 block and the next to be retransmitted
9254 * is the sequence in the sack block (has more
9255 * are acked). Count this as ACK'd data to boost
9256 * up the chances of recovering any false positives.
9258 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp));
9259 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp)));
9260 counter_u64_add(rack_express_sack, 1);
9261 if (rack->r_ctl.ack_count > 0xfff00000) {
9263 * reduce the number to keep us under
9266 rack->r_ctl.ack_count /= 2;
9267 rack->r_ctl.sack_count /= 2;
9269 goto out_with_totals;
9272 * Start the loop through the
9273 * rest of blocks, past the first block.
9279 /* Its a sack of some sort */
9280 rack->r_ctl.sack_count++;
9281 if (rack->r_ctl.sack_count > 0xfff00000) {
9283 * reduce the number to keep us under
9286 rack->r_ctl.ack_count /= 2;
9287 rack->r_ctl.sack_count /= 2;
9289 counter_u64_add(rack_sack_total, 1);
9290 if (rack->sack_attack_disable) {
9291 /* An attacker disablement is in place */
9292 if (num_sack_blks > 1) {
9293 rack->r_ctl.sack_count += (num_sack_blks - 1);
9294 rack->r_ctl.sack_moved_extra++;
9295 counter_u64_add(rack_move_some, 1);
9296 if (rack->r_ctl.sack_moved_extra > 0xfff00000) {
9297 rack->r_ctl.sack_moved_extra /= 2;
9298 rack->r_ctl.sack_noextra_move /= 2;
9303 rsm = rack->r_ctl.rc_sacklast;
9304 for (i = loop_start; i < num_sack_blks; i++) {
9305 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two);
9307 rack->r_wanted_output = 1;
9309 sack_changed += acked;
9313 * If we did not get a SACK for at least a MSS and
9314 * had to move at all, or if we moved more than our
9315 * threshold, it counts against the "extra" move.
9317 rack->r_ctl.sack_moved_extra += moved_two;
9318 counter_u64_add(rack_move_some, 1);
9321 * else we did not have to move
9322 * any more than we would expect.
9324 rack->r_ctl.sack_noextra_move++;
9325 counter_u64_add(rack_move_none, 1);
9327 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) {
9329 * If the SACK was not a full MSS then
9330 * we add to sack_count the number of
9331 * MSS's (or possibly more than
9332 * a MSS if its a TSO send) we had to skip by.
9334 rack->r_ctl.sack_count += moved_two;
9335 counter_u64_add(rack_sack_total, moved_two);
9338 * Now we need to setup for the next
9339 * round. First we make sure we won't
9340 * exceed the size of our uint32_t on
9341 * the various counts, and then clear out
9344 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) ||
9345 (rack->r_ctl.sack_noextra_move > 0xfff00000)) {
9346 rack->r_ctl.sack_moved_extra /= 2;
9347 rack->r_ctl.sack_noextra_move /= 2;
9349 if (rack->r_ctl.sack_count > 0xfff00000) {
9350 rack->r_ctl.ack_count /= 2;
9351 rack->r_ctl.sack_count /= 2;
9356 if (num_sack_blks > 1) {
9358 * You get an extra stroke if
9359 * you have more than one sack-blk, this
9360 * could be where we are skipping forward
9361 * and the sack-filter is still working, or
9362 * it could be an attacker constantly
9365 rack->r_ctl.sack_moved_extra++;
9366 counter_u64_add(rack_move_some, 1);
9369 #ifdef NETFLIX_EXP_DETECTION
9370 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp));
9373 /* Something changed cancel the rack timer */
9374 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9376 tsused = tcp_get_usecs(NULL);
9377 rsm = tcp_rack_output(tp, rack, tsused);
9378 if ((!IN_FASTRECOVERY(tp->t_flags)) &&
9380 /* Enter recovery */
9381 rack->r_ctl.rc_rsm_start = rsm->r_start;
9382 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
9383 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
9384 entered_recovery = 1;
9385 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
9387 * When we enter recovery we need to assure we send
9390 if (rack->rack_no_prr == 0) {
9391 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
9392 rack_log_to_prr(rack, 8, 0);
9394 rack->r_timer_override = 1;
9396 rack->r_ctl.rc_agg_early = 0;
9397 } else if (IN_FASTRECOVERY(tp->t_flags) &&
9399 (rack->r_rr_config == 3)) {
9401 * Assure we can output and we get no
9402 * remembered pace time except the retransmit.
9404 rack->r_timer_override = 1;
9405 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
9406 rack->r_ctl.rc_resend = rsm;
9408 if (IN_FASTRECOVERY(tp->t_flags) &&
9409 (rack->rack_no_prr == 0) &&
9410 (entered_recovery == 0)) {
9411 rack_update_prr(tp, rack, changed, th_ack);
9412 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) &&
9413 ((rack->rc_inp->inp_in_hpts == 0) &&
9414 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) {
9416 * If you are pacing output you don't want
9420 rack->r_ctl.rc_agg_early = 0;
9421 rack->r_timer_override = 1;
9427 rack_strike_dupack(struct tcp_rack *rack)
9429 struct rack_sendmap *rsm;
9431 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
9432 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
9433 rsm = TAILQ_NEXT(rsm, r_tnext);
9435 if (rsm && (rsm->r_dupack < 0xff)) {
9437 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) {
9441 * Here we see if we need to retransmit. For
9442 * a SACK type connection if enough time has passed
9443 * we will get a return of the rsm. For a non-sack
9444 * connection we will get the rsm returned if the
9445 * dupack value is 3 or more.
9447 cts = tcp_get_usecs(&tv);
9448 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts);
9449 if (rack->r_ctl.rc_resend != NULL) {
9450 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) {
9451 rack_cong_signal(rack->rc_tp, CC_NDUPACK,
9452 rack->rc_tp->snd_una);
9454 rack->r_wanted_output = 1;
9455 rack->r_timer_override = 1;
9456 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3);
9459 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3);
9465 rack_check_bottom_drag(struct tcpcb *tp,
9466 struct tcp_rack *rack,
9467 struct socket *so, int32_t acked)
9469 uint32_t segsiz, minseg;
9471 segsiz = ctf_fixed_maxseg(tp);
9474 if (tp->snd_max == tp->snd_una) {
9476 * We are doing dynamic pacing and we are way
9477 * under. Basically everything got acked while
9478 * we were still waiting on the pacer to expire.
9480 * This means we need to boost the b/w in
9481 * addition to any earlier boosting of
9484 rack->rc_dragged_bottom = 1;
9485 rack_validate_multipliers_at_or_above100(rack);
9487 * Lets use the segment bytes acked plus
9488 * the lowest RTT seen as the basis to
9489 * form a b/w estimate. This will be off
9490 * due to the fact that the true estimate
9491 * should be around 1/2 the time of the RTT
9492 * but we can settle for that.
9494 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) &&
9496 uint64_t bw, calc_bw, rtt;
9498 rtt = rack->r_ctl.rack_rs.rs_us_rtt;
9500 /* no us sample is there a ms one? */
9501 if (rack->r_ctl.rack_rs.rs_rtt_lowest) {
9502 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
9504 goto no_measurement;
9508 calc_bw = bw * 1000000;
9510 if (rack->r_ctl.last_max_bw &&
9511 (rack->r_ctl.last_max_bw < calc_bw)) {
9513 * If we have a last calculated max bw
9516 calc_bw = rack->r_ctl.last_max_bw;
9518 /* now plop it in */
9519 if (rack->rc_gp_filled == 0) {
9520 if (calc_bw > ONE_POINT_TWO_MEG) {
9522 * If we have no measurement
9523 * don't let us set in more than
9524 * 1.2Mbps. If we are still too
9525 * low after pacing with this we
9526 * will hopefully have a max b/w
9527 * available to sanity check things.
9529 calc_bw = ONE_POINT_TWO_MEG;
9531 rack->r_ctl.rc_rtt_diff = 0;
9532 rack->r_ctl.gp_bw = calc_bw;
9533 rack->rc_gp_filled = 1;
9534 if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
9535 rack->r_ctl.num_measurements = RACK_REQ_AVG;
9536 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
9537 } else if (calc_bw > rack->r_ctl.gp_bw) {
9538 rack->r_ctl.rc_rtt_diff = 0;
9539 if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
9540 rack->r_ctl.num_measurements = RACK_REQ_AVG;
9541 rack->r_ctl.gp_bw = calc_bw;
9542 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
9544 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9545 if ((rack->gp_ready == 0) &&
9546 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
9547 /* We have enough measurements now */
9549 rack_set_cc_pacing(rack);
9550 if (rack->defer_options)
9551 rack_apply_deferred_options(rack);
9554 * For acks over 1mss we do a extra boost to simulate
9555 * where we would get 2 acks (we want 110 for the mul).
9558 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9561 * zero rtt possibly?, settle for just an old increase.
9564 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9566 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
9567 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)),
9569 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) &&
9570 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) &&
9571 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <=
9572 (segsiz * rack_req_segs))) {
9574 * We are doing dynamic GP pacing and
9575 * we have everything except 1MSS or less
9576 * bytes left out. We are still pacing away.
9577 * And there is data that could be sent, This
9578 * means we are inserting delayed ack time in
9579 * our measurements because we are pacing too slow.
9581 rack_validate_multipliers_at_or_above100(rack);
9582 rack->rc_dragged_bottom = 1;
9583 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9590 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount)
9593 * The fast output path is enabled and we
9594 * have moved the cumack forward. Lets see if
9595 * we can expand forward the fast path length by
9596 * that amount. What we would ideally like to
9597 * do is increase the number of bytes in the
9598 * fast path block (left_to_send) by the
9599 * acked amount. However we have to gate that
9601 * 1) The amount outstanding and the rwnd of the peer
9602 * (i.e. we don't want to exceed the rwnd of the peer).
9604 * 2) The amount of data left in the socket buffer (i.e.
9605 * we can't send beyond what is in the buffer).
9607 * Note that this does not take into account any increase
9608 * in the cwnd. We will only extend the fast path by
9611 uint32_t new_total, gating_val;
9613 new_total = acked_amount + rack->r_ctl.fsb.left_to_send;
9614 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)),
9615 (tp->snd_wnd - (tp->snd_max - tp->snd_una)));
9616 if (new_total <= gating_val) {
9617 /* We can increase left_to_send by the acked amount */
9618 counter_u64_add(rack_extended_rfo, 1);
9619 rack->r_ctl.fsb.left_to_send = new_total;
9620 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))),
9621 ("rack:%p left_to_send:%u sbavail:%u out:%u",
9622 rack, rack->r_ctl.fsb.left_to_send,
9623 sbavail(&rack->rc_inp->inp_socket->so_snd),
9624 (tp->snd_max - tp->snd_una)));
9630 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una)
9633 * Here any sendmap entry that points to the
9634 * beginning mbuf must be adjusted to the correct
9635 * offset. This must be called with:
9636 * 1) The socket buffer locked
9637 * 2) snd_una adjusted to its new postion.
9639 * Note that (2) implies rack_ack_received has also
9642 * We grab the first mbuf in the socket buffer and
9643 * then go through the front of the sendmap, recalculating
9644 * the stored offset for any sendmap entry that has
9645 * that mbuf. We must use the sb functions to do this
9646 * since its possible an add was done has well as
9647 * the subtraction we may have just completed. This should
9648 * not be a penalty though, since we just referenced the sb
9649 * to go in and trim off the mbufs that we freed (of course
9650 * there will be a penalty for the sendmap references though).
9653 struct rack_sendmap *rsm;
9655 SOCKBUF_LOCK_ASSERT(sb);
9657 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
9658 if ((rsm == NULL) || (m == NULL)) {
9659 /* Nothing outstanding */
9662 while (rsm->m && (rsm->m == m)) {
9668 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff);
9669 if (rsm->orig_m_len != m->m_len) {
9670 rack_adjust_orig_mlen(rsm);
9672 if (rsm->soff != soff) {
9674 * This is not a fatal error, we anticipate it
9675 * might happen (the else code), so we count it here
9676 * so that under invariant we can see that it really
9679 counter_u64_add(rack_adjust_map_bw, 1);
9684 rsm->orig_m_len = rsm->m->m_len;
9686 rsm->orig_m_len = 0;
9688 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff);
9690 rsm->orig_m_len = rsm->m->m_len;
9692 rsm->orig_m_len = 0;
9694 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
9702 * Return value of 1, we do not need to call rack_process_data().
9703 * return value of 0, rack_process_data can be called.
9704 * For ret_val if its 0 the TCP is locked, if its non-zero
9705 * its unlocked and probably unsafe to touch the TCB.
9708 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
9709 struct tcpcb *tp, struct tcpopt *to,
9710 uint32_t tiwin, int32_t tlen,
9711 int32_t * ofia, int32_t thflags, int32_t *ret_val)
9713 int32_t ourfinisacked = 0;
9714 int32_t nsegs, acked_amount;
9717 struct tcp_rack *rack;
9718 int32_t under_pacing = 0;
9719 int32_t recovery = 0;
9721 rack = (struct tcp_rack *)tp->t_fb_ptr;
9722 if (SEQ_GT(th->th_ack, tp->snd_max)) {
9723 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val,
9724 &rack->r_ctl.challenge_ack_ts,
9725 &rack->r_ctl.challenge_ack_cnt);
9726 rack->r_wanted_output = 1;
9729 if (rack->gp_ready &&
9730 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
9733 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
9734 int in_rec, dup_ack_struck = 0;
9736 in_rec = IN_FASTRECOVERY(tp->t_flags);
9737 if (rack->rc_in_persist) {
9739 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
9740 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
9742 if ((th->th_ack == tp->snd_una) && (tiwin == tp->snd_wnd)) {
9743 rack_strike_dupack(rack);
9746 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck);
9748 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
9750 * Old ack, behind (or duplicate to) the last one rcv'd
9751 * Note: We mark reordering is occuring if its
9752 * less than and we have not closed our window.
9754 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) {
9755 counter_u64_add(rack_reorder_seen, 1);
9756 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
9761 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
9762 * something we sent.
9764 if (tp->t_flags & TF_NEEDSYN) {
9766 * T/TCP: Connection was half-synchronized, and our SYN has
9767 * been ACK'd (so connection is now fully synchronized). Go
9768 * to non-starred state, increment snd_una for ACK of SYN,
9769 * and check if we can do window scaling.
9771 tp->t_flags &= ~TF_NEEDSYN;
9773 /* Do window scaling? */
9774 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
9775 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
9776 tp->rcv_scale = tp->request_r_scale;
9777 /* Send window already scaled. */
9780 nsegs = max(1, m->m_pkthdr.lro_nsegs);
9781 INP_WLOCK_ASSERT(tp->t_inpcb);
9783 acked = BYTES_THIS_ACK(tp, th);
9784 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
9785 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
9787 * If we just performed our first retransmit, and the ACK arrives
9788 * within our recovery window, then it was a mistake to do the
9789 * retransmit in the first place. Recover our original cwnd and
9790 * ssthresh, and proceed to transmit where we left off.
9792 if ((tp->t_flags & TF_PREVVALID) &&
9793 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
9794 tp->t_flags &= ~TF_PREVVALID;
9795 if (tp->t_rxtshift == 1 &&
9796 (int)(ticks - tp->t_badrxtwin) < 0)
9797 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack);
9800 /* assure we are not backed off */
9802 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
9803 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
9804 rack->rc_tlp_in_progress = 0;
9805 rack->r_ctl.rc_tlp_cnt_out = 0;
9807 * If it is the RXT timer we want to
9808 * stop it, so we can restart a TLP.
9810 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
9811 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9812 #ifdef NETFLIX_HTTP_LOGGING
9813 tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
9817 * If we have a timestamp reply, update smoothed round trip time. If
9818 * no timestamp is present but transmit timer is running and timed
9819 * sequence number was acked, update smoothed round trip time. Since
9820 * we now have an rtt measurement, cancel the timer backoff (cf.,
9821 * Phil Karn's retransmit alg.). Recompute the initial retransmit
9824 * Some boxes send broken timestamp replies during the SYN+ACK
9825 * phase, ignore timestamps of 0 or we could calculate a huge RTT
9826 * and blow up the retransmit timer.
9829 * If all outstanding data is acked, stop retransmit timer and
9830 * remember to restart (more output or persist). If there is more
9831 * data to be acked, restart retransmit timer, using current
9832 * (possibly backed-off) value.
9836 *ofia = ourfinisacked;
9839 if (IN_RECOVERY(tp->t_flags)) {
9840 if (SEQ_LT(th->th_ack, tp->snd_recover) &&
9841 (SEQ_LT(th->th_ack, tp->snd_max))) {
9842 tcp_rack_partialack(tp);
9844 rack_post_recovery(tp, th->th_ack);
9849 * Let the congestion control algorithm update congestion control
9850 * related information. This typically means increasing the
9851 * congestion window.
9853 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery);
9854 SOCKBUF_LOCK(&so->so_snd);
9855 acked_amount = min(acked, (int)sbavail(&so->so_snd));
9856 tp->snd_wnd -= acked_amount;
9857 mfree = sbcut_locked(&so->so_snd, acked_amount);
9858 if ((sbused(&so->so_snd) == 0) &&
9859 (acked > acked_amount) &&
9860 (tp->t_state >= TCPS_FIN_WAIT_1) &&
9861 (tp->t_flags & TF_SENTFIN)) {
9863 * We must be sure our fin
9864 * was sent and acked (we can be
9865 * in FIN_WAIT_1 without having
9870 tp->snd_una = th->th_ack;
9871 if (acked_amount && sbavail(&so->so_snd))
9872 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
9873 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
9874 /* NB: sowwakeup_locked() does an implicit unlock. */
9875 sowwakeup_locked(so);
9877 if (SEQ_GT(tp->snd_una, tp->snd_recover))
9878 tp->snd_recover = tp->snd_una;
9880 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
9881 tp->snd_nxt = tp->snd_una;
9884 (rack->use_fixed_rate == 0) &&
9885 (rack->in_probe_rtt == 0) &&
9886 rack->rc_gp_dyn_mul &&
9887 rack->rc_always_pace) {
9888 /* Check if we are dragging bottom */
9889 rack_check_bottom_drag(tp, rack, so, acked);
9891 if (tp->snd_una == tp->snd_max) {
9892 /* Nothing left outstanding */
9893 tp->t_flags &= ~TF_PREVVALID;
9894 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
9895 rack->r_ctl.retran_during_recovery = 0;
9896 rack->r_ctl.dsack_byte_cnt = 0;
9897 if (rack->r_ctl.rc_went_idle_time == 0)
9898 rack->r_ctl.rc_went_idle_time = 1;
9899 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
9900 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
9902 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9903 /* Set need output so persist might get set */
9904 rack->r_wanted_output = 1;
9905 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
9906 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
9907 (sbavail(&so->so_snd) == 0) &&
9908 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
9910 * The socket was gone and the
9911 * peer sent data (now or in the past), time to
9915 /* tcp_close will kill the inp pre-log the Reset */
9916 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
9918 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
9923 *ofia = ourfinisacked;
9928 rack_collapsed_window(struct tcp_rack *rack)
9931 * Now we must walk the
9932 * send map and divide the
9933 * ones left stranded. These
9934 * guys can't cause us to abort
9935 * the connection and are really
9936 * "unsent". However if a buggy
9937 * client actually did keep some
9938 * of the data i.e. collapsed the win
9939 * and refused to ack and then opened
9940 * the win and acked that data. We would
9941 * get into an ack war, the simplier
9942 * method then of just pretending we
9943 * did not send those segments something
9946 struct rack_sendmap *rsm, *nrsm, fe, *insret;
9949 max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd;
9950 memset(&fe, 0, sizeof(fe));
9951 fe.r_start = max_seq;
9952 /* Find the first seq past or at maxseq */
9953 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
9955 /* Nothing to do strange */
9956 rack->rc_has_collapsed = 0;
9960 * Now do we need to split at
9961 * the collapse point?
9963 if (SEQ_GT(max_seq, rsm->r_start)) {
9964 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
9966 /* We can't get a rsm, mark all? */
9971 rack_clone_rsm(rack, nrsm, rsm, max_seq);
9972 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
9974 if (insret != NULL) {
9975 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
9976 nrsm, insret, rack, rsm);
9979 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, max_seq, __LINE__);
9980 if (rsm->r_in_tmap) {
9981 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
9982 nrsm->r_in_tmap = 1;
9985 * Set in the new RSM as the
9986 * collapsed starting point
9991 counter_u64_add(rack_collapsed_win, 1);
9992 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) {
9993 nrsm->r_flags |= RACK_RWND_COLLAPSED;
9995 rack->rc_has_collapsed = 1;
9999 rack_un_collapse_window(struct tcp_rack *rack)
10001 struct rack_sendmap *rsm;
10003 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
10004 if (rsm->r_flags & RACK_RWND_COLLAPSED)
10005 rsm->r_flags &= ~RACK_RWND_COLLAPSED;
10009 rack->rc_has_collapsed = 0;
10013 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack,
10014 int32_t tlen, int32_t tfo_syn)
10016 if (DELAY_ACK(tp, tlen) || tfo_syn) {
10017 if (rack->rc_dack_mode &&
10019 (rack->rc_dack_toggle == 1)) {
10020 goto no_delayed_ack;
10022 rack_timer_cancel(tp, rack,
10023 rack->r_ctl.rc_rcvtime, __LINE__);
10024 tp->t_flags |= TF_DELACK;
10027 rack->r_wanted_output = 1;
10028 tp->t_flags |= TF_ACKNOW;
10029 if (rack->rc_dack_mode) {
10030 if (tp->t_flags & TF_DELACK)
10031 rack->rc_dack_toggle = 1;
10033 rack->rc_dack_toggle = 0;
10039 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack)
10042 * If fast output is in progress, lets validate that
10043 * the new window did not shrink on us and make it
10044 * so fast output should end.
10046 if (rack->r_fast_output) {
10050 * Calculate what we will send if left as is
10051 * and compare that to our send window.
10053 out = ctf_outstanding(tp);
10054 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) {
10055 /* ok we have an issue */
10056 if (out >= tp->snd_wnd) {
10057 /* Turn off fast output the window is met or collapsed */
10058 rack->r_fast_output = 0;
10060 /* we have some room left */
10061 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out;
10062 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) {
10063 /* If not at least 1 full segment never mind */
10064 rack->r_fast_output = 0;
10073 * Return value of 1, the TCB is unlocked and most
10074 * likely gone, return value of 0, the TCP is still
10078 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
10079 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
10080 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
10083 * Update window information. Don't look at window if no ACK: TAC's
10084 * send garbage on first SYN.
10088 struct tcp_rack *rack;
10090 rack = (struct tcp_rack *)tp->t_fb_ptr;
10091 INP_WLOCK_ASSERT(tp->t_inpcb);
10092 nsegs = max(1, m->m_pkthdr.lro_nsegs);
10093 if ((thflags & TH_ACK) &&
10094 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
10095 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
10096 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
10097 /* keep track of pure window updates */
10099 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
10100 KMOD_TCPSTAT_INC(tcps_rcvwinupd);
10101 tp->snd_wnd = tiwin;
10102 rack_validate_fo_sendwin_up(tp, rack);
10103 tp->snd_wl1 = th->th_seq;
10104 tp->snd_wl2 = th->th_ack;
10105 if (tp->snd_wnd > tp->max_sndwnd)
10106 tp->max_sndwnd = tp->snd_wnd;
10107 rack->r_wanted_output = 1;
10108 } else if (thflags & TH_ACK) {
10109 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
10110 tp->snd_wnd = tiwin;
10111 rack_validate_fo_sendwin_up(tp, rack);
10112 tp->snd_wl1 = th->th_seq;
10113 tp->snd_wl2 = th->th_ack;
10116 if (tp->snd_wnd < ctf_outstanding(tp))
10117 /* The peer collapsed the window */
10118 rack_collapsed_window(rack);
10119 else if (rack->rc_has_collapsed)
10120 rack_un_collapse_window(rack);
10121 /* Was persist timer active and now we have window space? */
10122 if ((rack->rc_in_persist != 0) &&
10123 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
10124 rack->r_ctl.rc_pace_min_segs))) {
10125 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10126 tp->snd_nxt = tp->snd_max;
10127 /* Make sure we output to start the timer */
10128 rack->r_wanted_output = 1;
10130 /* Do we enter persists? */
10131 if ((rack->rc_in_persist == 0) &&
10132 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
10133 TCPS_HAVEESTABLISHED(tp->t_state) &&
10134 (tp->snd_max == tp->snd_una) &&
10135 sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
10136 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
10138 * Here the rwnd is less than
10139 * the pacing size, we are established,
10140 * nothing is outstanding, and there is
10141 * data to send. Enter persists.
10143 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10145 if (tp->t_flags2 & TF2_DROP_AF_DATA) {
10150 * don't process the URG bit, ignore them drag
10153 tp->rcv_up = tp->rcv_nxt;
10154 INP_WLOCK_ASSERT(tp->t_inpcb);
10157 * Process the segment text, merging it into the TCP sequencing
10158 * queue, and arranging for acknowledgment of receipt if necessary.
10159 * This process logically involves adjusting tp->rcv_wnd as data is
10160 * presented to the user (this happens in tcp_usrreq.c, case
10161 * PRU_RCVD). If a FIN has already been received on this connection
10162 * then we just ignore the text.
10164 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
10165 IS_FASTOPEN(tp->t_flags));
10166 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
10167 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
10168 tcp_seq save_start = th->th_seq;
10169 tcp_seq save_rnxt = tp->rcv_nxt;
10170 int save_tlen = tlen;
10172 m_adj(m, drop_hdrlen); /* delayed header drop */
10174 * Insert segment which includes th into TCP reassembly
10175 * queue with control block tp. Set thflags to whether
10176 * reassembly now includes a segment with FIN. This handles
10177 * the common case inline (segment is the next to be
10178 * received on an established connection, and the queue is
10179 * empty), avoiding linkage into and removal from the queue
10180 * and repetition of various conversions. Set DELACK for
10181 * segments received in order, but ack immediately when
10182 * segments are out of order (so fast retransmit can work).
10184 if (th->th_seq == tp->rcv_nxt &&
10186 (TCPS_HAVEESTABLISHED(tp->t_state) ||
10188 #ifdef NETFLIX_SB_LIMITS
10189 u_int mcnt, appended;
10191 if (so->so_rcv.sb_shlim) {
10192 mcnt = m_memcnt(m);
10194 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
10195 CFO_NOSLEEP, NULL) == false) {
10196 counter_u64_add(tcp_sb_shlim_fails, 1);
10202 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn);
10203 tp->rcv_nxt += tlen;
10205 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
10206 (tp->t_fbyte_in == 0)) {
10207 tp->t_fbyte_in = ticks;
10208 if (tp->t_fbyte_in == 0)
10209 tp->t_fbyte_in = 1;
10210 if (tp->t_fbyte_out && tp->t_fbyte_in)
10211 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
10213 thflags = th->th_flags & TH_FIN;
10214 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
10215 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
10216 SOCKBUF_LOCK(&so->so_rcv);
10217 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10220 #ifdef NETFLIX_SB_LIMITS
10223 sbappendstream_locked(&so->so_rcv, m, 0);
10225 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
10226 /* NB: sorwakeup_locked() does an implicit unlock. */
10227 sorwakeup_locked(so);
10228 #ifdef NETFLIX_SB_LIMITS
10229 if (so->so_rcv.sb_shlim && appended != mcnt)
10230 counter_fo_release(so->so_rcv.sb_shlim,
10235 * XXX: Due to the header drop above "th" is
10236 * theoretically invalid by now. Fortunately
10237 * m_adj() doesn't actually frees any mbufs when
10238 * trimming from the head.
10240 tcp_seq temp = save_start;
10242 thflags = tcp_reass(tp, th, &temp, &tlen, m);
10243 tp->t_flags |= TF_ACKNOW;
10244 if (tp->t_flags & TF_WAKESOR) {
10245 tp->t_flags &= ~TF_WAKESOR;
10246 /* NB: sorwakeup_locked() does an implicit unlock. */
10247 sorwakeup_locked(so);
10250 if ((tp->t_flags & TF_SACK_PERMIT) &&
10252 TCPS_HAVEESTABLISHED(tp->t_state)) {
10253 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
10255 * DSACK actually handled in the fastpath
10258 RACK_OPTS_INC(tcp_sack_path_1);
10259 tcp_update_sack_list(tp, save_start,
10260 save_start + save_tlen);
10261 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
10262 if ((tp->rcv_numsacks >= 1) &&
10263 (tp->sackblks[0].end == save_start)) {
10265 * Partial overlap, recorded at todrop
10268 RACK_OPTS_INC(tcp_sack_path_2a);
10269 tcp_update_sack_list(tp,
10270 tp->sackblks[0].start,
10271 tp->sackblks[0].end);
10273 RACK_OPTS_INC(tcp_sack_path_2b);
10274 tcp_update_dsack_list(tp, save_start,
10275 save_start + save_tlen);
10277 } else if (tlen >= save_tlen) {
10278 /* Update of sackblks. */
10279 RACK_OPTS_INC(tcp_sack_path_3);
10280 tcp_update_dsack_list(tp, save_start,
10281 save_start + save_tlen);
10282 } else if (tlen > 0) {
10283 RACK_OPTS_INC(tcp_sack_path_4);
10284 tcp_update_dsack_list(tp, save_start,
10285 save_start + tlen);
10290 thflags &= ~TH_FIN;
10294 * If FIN is received ACK the FIN and let the user know that the
10295 * connection is closing.
10297 if (thflags & TH_FIN) {
10298 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
10299 /* The socket upcall is handled by socantrcvmore. */
10302 * If connection is half-synchronized (ie NEEDSYN
10303 * flag on) then delay ACK, so it may be piggybacked
10304 * when SYN is sent. Otherwise, since we received a
10305 * FIN then no more input can be expected, send ACK
10308 if (tp->t_flags & TF_NEEDSYN) {
10309 rack_timer_cancel(tp, rack,
10310 rack->r_ctl.rc_rcvtime, __LINE__);
10311 tp->t_flags |= TF_DELACK;
10313 tp->t_flags |= TF_ACKNOW;
10317 switch (tp->t_state) {
10319 * In SYN_RECEIVED and ESTABLISHED STATES enter the
10320 * CLOSE_WAIT state.
10322 case TCPS_SYN_RECEIVED:
10323 tp->t_starttime = ticks;
10325 case TCPS_ESTABLISHED:
10326 rack_timer_cancel(tp, rack,
10327 rack->r_ctl.rc_rcvtime, __LINE__);
10328 tcp_state_change(tp, TCPS_CLOSE_WAIT);
10332 * If still in FIN_WAIT_1 STATE FIN has not been
10333 * acked so enter the CLOSING state.
10335 case TCPS_FIN_WAIT_1:
10336 rack_timer_cancel(tp, rack,
10337 rack->r_ctl.rc_rcvtime, __LINE__);
10338 tcp_state_change(tp, TCPS_CLOSING);
10342 * In FIN_WAIT_2 state enter the TIME_WAIT state,
10343 * starting the time-wait timer, turning off the
10344 * other standard timers.
10346 case TCPS_FIN_WAIT_2:
10347 rack_timer_cancel(tp, rack,
10348 rack->r_ctl.rc_rcvtime, __LINE__);
10354 * Return any desired output.
10356 if ((tp->t_flags & TF_ACKNOW) ||
10357 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
10358 rack->r_wanted_output = 1;
10360 INP_WLOCK_ASSERT(tp->t_inpcb);
10365 * Here nothing is really faster, its just that we
10366 * have broken out the fast-data path also just like
10370 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
10371 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10372 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos)
10375 int32_t newsize = 0; /* automatic sockbuf scaling */
10376 struct tcp_rack *rack;
10377 #ifdef NETFLIX_SB_LIMITS
10378 u_int mcnt, appended;
10382 * The size of tcp_saveipgen must be the size of the max ip header,
10385 u_char tcp_saveipgen[IP6_HDR_LEN];
10386 struct tcphdr tcp_savetcp;
10391 * If last ACK falls within this segment's sequence numbers, record
10392 * the timestamp. NOTE that the test is modified according to the
10393 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
10395 if (__predict_false(th->th_seq != tp->rcv_nxt)) {
10398 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
10401 if (tiwin && tiwin != tp->snd_wnd) {
10404 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
10407 if (__predict_false((to->to_flags & TOF_TS) &&
10408 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
10411 if (__predict_false((th->th_ack != tp->snd_una))) {
10414 if (__predict_false(tlen > sbspace(&so->so_rcv))) {
10417 if ((to->to_flags & TOF_TS) != 0 &&
10418 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
10419 tp->ts_recent_age = tcp_ts_getticks();
10420 tp->ts_recent = to->to_tsval;
10422 rack = (struct tcp_rack *)tp->t_fb_ptr;
10424 * This is a pure, in-sequence data packet with nothing on the
10425 * reassembly queue and we have enough buffer space to take it.
10427 nsegs = max(1, m->m_pkthdr.lro_nsegs);
10429 #ifdef NETFLIX_SB_LIMITS
10430 if (so->so_rcv.sb_shlim) {
10431 mcnt = m_memcnt(m);
10433 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
10434 CFO_NOSLEEP, NULL) == false) {
10435 counter_u64_add(tcp_sb_shlim_fails, 1);
10441 /* Clean receiver SACK report if present */
10442 if (tp->rcv_numsacks)
10443 tcp_clean_sackreport(tp);
10444 KMOD_TCPSTAT_INC(tcps_preddat);
10445 tp->rcv_nxt += tlen;
10447 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
10448 (tp->t_fbyte_in == 0)) {
10449 tp->t_fbyte_in = ticks;
10450 if (tp->t_fbyte_in == 0)
10451 tp->t_fbyte_in = 1;
10452 if (tp->t_fbyte_out && tp->t_fbyte_in)
10453 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
10456 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
10458 tp->snd_wl1 = th->th_seq;
10460 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
10462 tp->rcv_up = tp->rcv_nxt;
10463 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
10464 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
10466 if (so->so_options & SO_DEBUG)
10467 tcp_trace(TA_INPUT, ostate, tp,
10468 (void *)tcp_saveipgen, &tcp_savetcp, 0);
10470 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
10472 /* Add data to socket buffer. */
10473 SOCKBUF_LOCK(&so->so_rcv);
10474 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10478 * Set new socket buffer size. Give up when limit is
10482 if (!sbreserve_locked(&so->so_rcv,
10483 newsize, so, NULL))
10484 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
10485 m_adj(m, drop_hdrlen); /* delayed header drop */
10486 #ifdef NETFLIX_SB_LIMITS
10489 sbappendstream_locked(&so->so_rcv, m, 0);
10490 ctf_calc_rwin(so, tp);
10492 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
10493 /* NB: sorwakeup_locked() does an implicit unlock. */
10494 sorwakeup_locked(so);
10495 #ifdef NETFLIX_SB_LIMITS
10496 if (so->so_rcv.sb_shlim && mcnt != appended)
10497 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended);
10499 rack_handle_delayed_ack(tp, rack, tlen, 0);
10500 if (tp->snd_una == tp->snd_max)
10501 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
10506 * This subfunction is used to try to highly optimize the
10507 * fast path. We again allow window updates that are
10508 * in sequence to remain in the fast-path. We also add
10509 * in the __predict's to attempt to help the compiler.
10510 * Note that if we return a 0, then we can *not* process
10511 * it and the caller should push the packet into the
10515 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
10516 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10517 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
10523 * The size of tcp_saveipgen must be the size of the max ip header,
10526 u_char tcp_saveipgen[IP6_HDR_LEN];
10527 struct tcphdr tcp_savetcp;
10530 int32_t under_pacing = 0;
10531 struct tcp_rack *rack;
10533 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
10534 /* Old ack, behind (or duplicate to) the last one rcv'd */
10537 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
10538 /* Above what we have sent? */
10541 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
10542 /* We are retransmitting */
10545 if (__predict_false(tiwin == 0)) {
10549 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
10550 /* We need a SYN or a FIN, unlikely.. */
10553 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
10554 /* Timestamp is behind .. old ack with seq wrap? */
10557 if (__predict_false(IN_RECOVERY(tp->t_flags))) {
10558 /* Still recovering */
10561 rack = (struct tcp_rack *)tp->t_fb_ptr;
10562 if (rack->r_ctl.rc_sacked) {
10563 /* We have sack holes on our scoreboard */
10566 /* Ok if we reach here, we can process a fast-ack */
10567 if (rack->gp_ready &&
10568 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
10571 nsegs = max(1, m->m_pkthdr.lro_nsegs);
10572 rack_log_ack(tp, to, th, 0, 0);
10573 /* Did the window get updated? */
10574 if (tiwin != tp->snd_wnd) {
10575 tp->snd_wnd = tiwin;
10576 rack_validate_fo_sendwin_up(tp, rack);
10577 tp->snd_wl1 = th->th_seq;
10578 if (tp->snd_wnd > tp->max_sndwnd)
10579 tp->max_sndwnd = tp->snd_wnd;
10581 /* Do we exit persists? */
10582 if ((rack->rc_in_persist != 0) &&
10583 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
10584 rack->r_ctl.rc_pace_min_segs))) {
10585 rack_exit_persist(tp, rack, cts);
10587 /* Do we enter persists? */
10588 if ((rack->rc_in_persist == 0) &&
10589 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
10590 TCPS_HAVEESTABLISHED(tp->t_state) &&
10591 (tp->snd_max == tp->snd_una) &&
10592 sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
10593 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
10595 * Here the rwnd is less than
10596 * the pacing size, we are established,
10597 * nothing is outstanding, and there is
10598 * data to send. Enter persists.
10600 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10603 * If last ACK falls within this segment's sequence numbers, record
10604 * the timestamp. NOTE that the test is modified according to the
10605 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
10607 if ((to->to_flags & TOF_TS) != 0 &&
10608 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
10609 tp->ts_recent_age = tcp_ts_getticks();
10610 tp->ts_recent = to->to_tsval;
10613 * This is a pure ack for outstanding data.
10615 KMOD_TCPSTAT_INC(tcps_predack);
10618 * "bad retransmit" recovery.
10620 if ((tp->t_flags & TF_PREVVALID) &&
10621 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
10622 tp->t_flags &= ~TF_PREVVALID;
10623 if (tp->t_rxtshift == 1 &&
10624 (int)(ticks - tp->t_badrxtwin) < 0)
10625 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack);
10628 * Recalculate the transmit timer / rtt.
10630 * Some boxes send broken timestamp replies during the SYN+ACK
10631 * phase, ignore timestamps of 0 or we could calculate a huge RTT
10632 * and blow up the retransmit timer.
10634 acked = BYTES_THIS_ACK(tp, th);
10637 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
10638 hhook_run_tcp_est_in(tp, th, to);
10640 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
10641 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
10643 struct mbuf *mfree;
10645 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0);
10646 SOCKBUF_LOCK(&so->so_snd);
10647 mfree = sbcut_locked(&so->so_snd, acked);
10648 tp->snd_una = th->th_ack;
10649 /* Note we want to hold the sb lock through the sendmap adjust */
10650 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
10651 /* Wake up the socket if we have room to write more */
10652 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
10653 sowwakeup_locked(so);
10655 tp->t_rxtshift = 0;
10656 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
10657 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
10658 rack->rc_tlp_in_progress = 0;
10659 rack->r_ctl.rc_tlp_cnt_out = 0;
10661 * If it is the RXT timer we want to
10662 * stop it, so we can restart a TLP.
10664 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
10665 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
10666 #ifdef NETFLIX_HTTP_LOGGING
10667 tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
10671 * Let the congestion control algorithm update congestion control
10672 * related information. This typically means increasing the
10673 * congestion window.
10675 if (tp->snd_wnd < ctf_outstanding(tp)) {
10676 /* The peer collapsed the window */
10677 rack_collapsed_window(rack);
10678 } else if (rack->rc_has_collapsed)
10679 rack_un_collapse_window(rack);
10682 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
10684 tp->snd_wl2 = th->th_ack;
10687 /* ND6_HINT(tp); *//* Some progress has been made. */
10690 * If all outstanding data are acked, stop retransmit timer,
10691 * otherwise restart timer using current (possibly backed-off)
10692 * value. If process is waiting for space, wakeup/selwakeup/signal.
10693 * If data are ready to send, let tcp_output decide between more
10694 * output or persist.
10697 if (so->so_options & SO_DEBUG)
10698 tcp_trace(TA_INPUT, ostate, tp,
10699 (void *)tcp_saveipgen,
10702 if (under_pacing &&
10703 (rack->use_fixed_rate == 0) &&
10704 (rack->in_probe_rtt == 0) &&
10705 rack->rc_gp_dyn_mul &&
10706 rack->rc_always_pace) {
10707 /* Check if we are dragging bottom */
10708 rack_check_bottom_drag(tp, rack, so, acked);
10710 if (tp->snd_una == tp->snd_max) {
10711 tp->t_flags &= ~TF_PREVVALID;
10712 rack->r_ctl.retran_during_recovery = 0;
10713 rack->r_ctl.dsack_byte_cnt = 0;
10714 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
10715 if (rack->r_ctl.rc_went_idle_time == 0)
10716 rack->r_ctl.rc_went_idle_time = 1;
10717 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
10718 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
10720 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
10722 if (acked && rack->r_fast_output)
10723 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked);
10724 if (sbavail(&so->so_snd)) {
10725 rack->r_wanted_output = 1;
10731 * Return value of 1, the TCB is unlocked and most
10732 * likely gone, return value of 0, the TCP is still
10736 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
10737 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10738 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
10740 int32_t ret_val = 0;
10742 int32_t ourfinisacked = 0;
10743 struct tcp_rack *rack;
10745 ctf_calc_rwin(so, tp);
10747 * If the state is SYN_SENT: if seg contains an ACK, but not for our
10748 * SYN, drop the input. if seg contains a RST, then drop the
10749 * connection. if seg does not contain SYN, then drop it. Otherwise
10750 * this is an acceptable SYN segment initialize tp->rcv_nxt and
10751 * tp->irs if seg contains ack then advance tp->snd_una if seg
10752 * contains an ECE and ECN support is enabled, the stream is ECN
10753 * capable. if SYN has been acked change to ESTABLISHED else
10754 * SYN_RCVD state arrange for segment to be acked (eventually)
10755 * continue processing rest of data/controls.
10757 if ((thflags & TH_ACK) &&
10758 (SEQ_LEQ(th->th_ack, tp->iss) ||
10759 SEQ_GT(th->th_ack, tp->snd_max))) {
10760 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
10761 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10764 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
10765 TCP_PROBE5(connect__refused, NULL, tp,
10766 mtod(m, const char *), tp, th);
10767 tp = tcp_drop(tp, ECONNREFUSED);
10768 ctf_do_drop(m, tp);
10771 if (thflags & TH_RST) {
10772 ctf_do_drop(m, tp);
10775 if (!(thflags & TH_SYN)) {
10776 ctf_do_drop(m, tp);
10779 tp->irs = th->th_seq;
10780 tcp_rcvseqinit(tp);
10781 rack = (struct tcp_rack *)tp->t_fb_ptr;
10782 if (thflags & TH_ACK) {
10783 int tfo_partial = 0;
10785 KMOD_TCPSTAT_INC(tcps_connects);
10788 mac_socketpeer_set_from_mbuf(m, so);
10790 /* Do window scaling on this connection? */
10791 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
10792 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
10793 tp->rcv_scale = tp->request_r_scale;
10795 tp->rcv_adv += min(tp->rcv_wnd,
10796 TCP_MAXWIN << tp->rcv_scale);
10798 * If not all the data that was sent in the TFO SYN
10799 * has been acked, resend the remainder right away.
10801 if (IS_FASTOPEN(tp->t_flags) &&
10802 (tp->snd_una != tp->snd_max)) {
10803 tp->snd_nxt = th->th_ack;
10807 * If there's data, delay ACK; if there's also a FIN ACKNOW
10808 * will be turned on later.
10810 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) {
10811 rack_timer_cancel(tp, rack,
10812 rack->r_ctl.rc_rcvtime, __LINE__);
10813 tp->t_flags |= TF_DELACK;
10815 rack->r_wanted_output = 1;
10816 tp->t_flags |= TF_ACKNOW;
10817 rack->rc_dack_toggle = 0;
10819 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) &&
10820 (V_tcp_do_ecn == 1)) {
10821 tp->t_flags2 |= TF2_ECN_PERMIT;
10822 KMOD_TCPSTAT_INC(tcps_ecn_shs);
10824 if (SEQ_GT(th->th_ack, tp->snd_una)) {
10826 * We advance snd_una for the
10827 * fast open case. If th_ack is
10828 * acknowledging data beyond
10829 * snd_una we can't just call
10830 * ack-processing since the
10831 * data stream in our send-map
10832 * will start at snd_una + 1 (one
10833 * beyond the SYN). If its just
10834 * equal we don't need to do that
10835 * and there is no send_map.
10840 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
10841 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
10843 tp->t_starttime = ticks;
10844 if (tp->t_flags & TF_NEEDFIN) {
10845 tcp_state_change(tp, TCPS_FIN_WAIT_1);
10846 tp->t_flags &= ~TF_NEEDFIN;
10847 thflags &= ~TH_SYN;
10849 tcp_state_change(tp, TCPS_ESTABLISHED);
10850 TCP_PROBE5(connect__established, NULL, tp,
10851 mtod(m, const char *), tp, th);
10852 rack_cc_conn_init(tp);
10856 * Received initial SYN in SYN-SENT[*] state => simultaneous
10857 * open. If segment contains CC option and there is a
10858 * cached CC, apply TAO test. If it succeeds, connection is *
10859 * half-synchronized. Otherwise, do 3-way handshake:
10860 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
10861 * there was no CC option, clear cached CC value.
10863 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
10864 tcp_state_change(tp, TCPS_SYN_RECEIVED);
10866 INP_WLOCK_ASSERT(tp->t_inpcb);
10868 * Advance th->th_seq to correspond to first data byte. If data,
10869 * trim to stay within window, dropping FIN if necessary.
10872 if (tlen > tp->rcv_wnd) {
10873 todrop = tlen - tp->rcv_wnd;
10875 tlen = tp->rcv_wnd;
10876 thflags &= ~TH_FIN;
10877 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin);
10878 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
10880 tp->snd_wl1 = th->th_seq - 1;
10881 tp->rcv_up = th->th_seq;
10883 * Client side of transaction: already sent SYN and data. If the
10884 * remote host used T/TCP to validate the SYN, our data will be
10885 * ACK'd; if so, enter normal data segment processing in the middle
10886 * of step 5, ack processing. Otherwise, goto step 6.
10888 if (thflags & TH_ACK) {
10889 /* For syn-sent we need to possibly update the rtt */
10890 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
10893 mcts = tcp_ts_getticks();
10894 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
10895 if (!tp->t_rttlow || tp->t_rttlow > t)
10897 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4);
10898 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
10899 tcp_rack_xmit_timer_commit(rack, tp);
10901 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
10903 /* We may have changed to FIN_WAIT_1 above */
10904 if (tp->t_state == TCPS_FIN_WAIT_1) {
10906 * In FIN_WAIT_1 STATE in addition to the processing
10907 * for the ESTABLISHED state if our FIN is now
10908 * acknowledged then enter FIN_WAIT_2.
10910 if (ourfinisacked) {
10912 * If we can't receive any more data, then
10913 * closing user can proceed. Starting the
10914 * timer is contrary to the specification,
10915 * but if we don't get a FIN we'll hang
10918 * XXXjl: we should release the tp also, and
10919 * use a compressed state.
10921 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10922 soisdisconnected(so);
10923 tcp_timer_activate(tp, TT_2MSL,
10924 (tcp_fast_finwait2_recycle ?
10925 tcp_finwait2_timeout :
10928 tcp_state_change(tp, TCPS_FIN_WAIT_2);
10932 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10933 tiwin, thflags, nxt_pkt));
10937 * Return value of 1, the TCB is unlocked and most
10938 * likely gone, return value of 0, the TCP is still
10942 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
10943 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10944 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
10946 struct tcp_rack *rack;
10947 int32_t ret_val = 0;
10948 int32_t ourfinisacked = 0;
10950 ctf_calc_rwin(so, tp);
10951 if ((thflags & TH_ACK) &&
10952 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
10953 SEQ_GT(th->th_ack, tp->snd_max))) {
10954 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
10955 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10958 rack = (struct tcp_rack *)tp->t_fb_ptr;
10959 if (IS_FASTOPEN(tp->t_flags)) {
10961 * When a TFO connection is in SYN_RECEIVED, the
10962 * only valid packets are the initial SYN, a
10963 * retransmit/copy of the initial SYN (possibly with
10964 * a subset of the original data), a valid ACK, a
10967 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
10968 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
10969 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10971 } else if (thflags & TH_SYN) {
10972 /* non-initial SYN is ignored */
10973 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
10974 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
10975 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
10976 ctf_do_drop(m, NULL);
10979 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
10980 ctf_do_drop(m, NULL);
10984 if ((thflags & TH_RST) ||
10985 (tp->t_fin_is_rst && (thflags & TH_FIN)))
10986 return (ctf_process_rst(m, th, so, tp));
10988 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
10989 * it's less than ts_recent, drop it.
10991 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
10992 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
10993 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
10997 * In the SYN-RECEIVED state, validate that the packet belongs to
10998 * this connection before trimming the data to fit the receive
10999 * window. Check the sequence number versus IRS since we know the
11000 * sequence numbers haven't wrapped. This is a partial fix for the
11001 * "LAND" DoS attack.
11003 if (SEQ_LT(th->th_seq, tp->irs)) {
11004 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
11005 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11008 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11009 &rack->r_ctl.challenge_ack_ts,
11010 &rack->r_ctl.challenge_ack_cnt)) {
11014 * If last ACK falls within this segment's sequence numbers, record
11015 * its timestamp. NOTE: 1) That the test incorporates suggestions
11016 * from the latest proposal of the tcplw@cray.com list (Braden
11017 * 1993/04/26). 2) That updating only on newer timestamps interferes
11018 * with our earlier PAWS tests, so this check should be solely
11019 * predicated on the sequence space of this segment. 3) That we
11020 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11021 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11022 * SEG.Len, This modified check allows us to overcome RFC1323's
11023 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11024 * p.869. In such cases, we can still calculate the RTT correctly
11025 * when RCV.NXT == Last.ACK.Sent.
11027 if ((to->to_flags & TOF_TS) != 0 &&
11028 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11029 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11030 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11031 tp->ts_recent_age = tcp_ts_getticks();
11032 tp->ts_recent = to->to_tsval;
11034 tp->snd_wnd = tiwin;
11035 rack_validate_fo_sendwin_up(tp, rack);
11037 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11038 * is on (half-synchronized state), then queue data for later
11039 * processing; else drop segment and return.
11041 if ((thflags & TH_ACK) == 0) {
11042 if (IS_FASTOPEN(tp->t_flags)) {
11043 rack_cc_conn_init(tp);
11045 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11046 tiwin, thflags, nxt_pkt));
11048 KMOD_TCPSTAT_INC(tcps_connects);
11050 /* Do window scaling? */
11051 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
11052 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
11053 tp->rcv_scale = tp->request_r_scale;
11056 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* ->
11059 tp->t_starttime = ticks;
11060 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
11061 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
11062 tp->t_tfo_pending = NULL;
11064 if (tp->t_flags & TF_NEEDFIN) {
11065 tcp_state_change(tp, TCPS_FIN_WAIT_1);
11066 tp->t_flags &= ~TF_NEEDFIN;
11068 tcp_state_change(tp, TCPS_ESTABLISHED);
11069 TCP_PROBE5(accept__established, NULL, tp,
11070 mtod(m, const char *), tp, th);
11072 * TFO connections call cc_conn_init() during SYN
11073 * processing. Calling it again here for such connections
11074 * is not harmless as it would undo the snd_cwnd reduction
11075 * that occurs when a TFO SYN|ACK is retransmitted.
11077 if (!IS_FASTOPEN(tp->t_flags))
11078 rack_cc_conn_init(tp);
11081 * Account for the ACK of our SYN prior to
11082 * regular ACK processing below, except for
11083 * simultaneous SYN, which is handled later.
11085 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
11088 * If segment contains data or ACK, will call tcp_reass() later; if
11089 * not, do so now to pass queued data to user.
11091 if (tlen == 0 && (thflags & TH_FIN) == 0) {
11092 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
11094 if (tp->t_flags & TF_WAKESOR) {
11095 tp->t_flags &= ~TF_WAKESOR;
11096 /* NB: sorwakeup_locked() does an implicit unlock. */
11097 sorwakeup_locked(so);
11100 tp->snd_wl1 = th->th_seq - 1;
11101 /* For syn-recv we need to possibly update the rtt */
11102 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
11105 mcts = tcp_ts_getticks();
11106 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
11107 if (!tp->t_rttlow || tp->t_rttlow > t)
11109 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5);
11110 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
11111 tcp_rack_xmit_timer_commit(rack, tp);
11113 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11116 if (tp->t_state == TCPS_FIN_WAIT_1) {
11117 /* We could have went to FIN_WAIT_1 (or EST) above */
11119 * In FIN_WAIT_1 STATE in addition to the processing for the
11120 * ESTABLISHED state if our FIN is now acknowledged then
11121 * enter FIN_WAIT_2.
11123 if (ourfinisacked) {
11125 * If we can't receive any more data, then closing
11126 * user can proceed. Starting the timer is contrary
11127 * to the specification, but if we don't get a FIN
11128 * we'll hang forever.
11130 * XXXjl: we should release the tp also, and use a
11131 * compressed state.
11133 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
11134 soisdisconnected(so);
11135 tcp_timer_activate(tp, TT_2MSL,
11136 (tcp_fast_finwait2_recycle ?
11137 tcp_finwait2_timeout :
11140 tcp_state_change(tp, TCPS_FIN_WAIT_2);
11143 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11144 tiwin, thflags, nxt_pkt));
11148 * Return value of 1, the TCB is unlocked and most
11149 * likely gone, return value of 0, the TCP is still
11153 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
11154 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11155 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11157 int32_t ret_val = 0;
11158 struct tcp_rack *rack;
11161 * Header prediction: check for the two common cases of a
11162 * uni-directional data xfer. If the packet has no control flags,
11163 * is in-sequence, the window didn't change and we're not
11164 * retransmitting, it's a candidate. If the length is zero and the
11165 * ack moved forward, we're the sender side of the xfer. Just free
11166 * the data acked & wake any higher level process that was blocked
11167 * waiting for space. If the length is non-zero and the ack didn't
11168 * move, we're the receiver side. If we're getting packets in-order
11169 * (the reassembly queue is empty), add the data toc The socket
11170 * buffer and note that we need a delayed ack. Make sure that the
11171 * hidden state-flags are also off. Since we check for
11172 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
11174 rack = (struct tcp_rack *)tp->t_fb_ptr;
11175 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
11176 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) &&
11177 __predict_true(SEGQ_EMPTY(tp)) &&
11178 __predict_true(th->th_seq == tp->rcv_nxt)) {
11180 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
11181 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
11185 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
11186 tiwin, nxt_pkt, iptos)) {
11191 ctf_calc_rwin(so, tp);
11193 if ((thflags & TH_RST) ||
11194 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11195 return (ctf_process_rst(m, th, so, tp));
11198 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11199 * synchronized state.
11201 if (thflags & TH_SYN) {
11202 ctf_challenge_ack(m, th, tp, &ret_val);
11206 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11207 * it's less than ts_recent, drop it.
11209 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11210 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11211 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11214 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11215 &rack->r_ctl.challenge_ack_ts,
11216 &rack->r_ctl.challenge_ack_cnt)) {
11220 * If last ACK falls within this segment's sequence numbers, record
11221 * its timestamp. NOTE: 1) That the test incorporates suggestions
11222 * from the latest proposal of the tcplw@cray.com list (Braden
11223 * 1993/04/26). 2) That updating only on newer timestamps interferes
11224 * with our earlier PAWS tests, so this check should be solely
11225 * predicated on the sequence space of this segment. 3) That we
11226 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11227 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11228 * SEG.Len, This modified check allows us to overcome RFC1323's
11229 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11230 * p.869. In such cases, we can still calculate the RTT correctly
11231 * when RCV.NXT == Last.ACK.Sent.
11233 if ((to->to_flags & TOF_TS) != 0 &&
11234 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11235 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11236 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11237 tp->ts_recent_age = tcp_ts_getticks();
11238 tp->ts_recent = to->to_tsval;
11241 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11242 * is on (half-synchronized state), then queue data for later
11243 * processing; else drop segment and return.
11245 if ((thflags & TH_ACK) == 0) {
11246 if (tp->t_flags & TF_NEEDSYN) {
11247 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11248 tiwin, thflags, nxt_pkt));
11250 } else if (tp->t_flags & TF_ACKNOW) {
11251 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11252 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11255 ctf_do_drop(m, NULL);
11262 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
11265 if (sbavail(&so->so_snd)) {
11266 if (ctf_progress_timeout_check(tp, true)) {
11267 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
11268 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11269 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11273 /* State changes only happen in rack_process_data() */
11274 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11275 tiwin, thflags, nxt_pkt));
11279 * Return value of 1, the TCB is unlocked and most
11280 * likely gone, return value of 0, the TCP is still
11284 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
11285 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11286 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11288 int32_t ret_val = 0;
11289 struct tcp_rack *rack;
11291 rack = (struct tcp_rack *)tp->t_fb_ptr;
11292 ctf_calc_rwin(so, tp);
11293 if ((thflags & TH_RST) ||
11294 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11295 return (ctf_process_rst(m, th, so, tp));
11297 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11298 * synchronized state.
11300 if (thflags & TH_SYN) {
11301 ctf_challenge_ack(m, th, tp, &ret_val);
11305 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11306 * it's less than ts_recent, drop it.
11308 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11309 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11310 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11313 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11314 &rack->r_ctl.challenge_ack_ts,
11315 &rack->r_ctl.challenge_ack_cnt)) {
11319 * If last ACK falls within this segment's sequence numbers, record
11320 * its timestamp. NOTE: 1) That the test incorporates suggestions
11321 * from the latest proposal of the tcplw@cray.com list (Braden
11322 * 1993/04/26). 2) That updating only on newer timestamps interferes
11323 * with our earlier PAWS tests, so this check should be solely
11324 * predicated on the sequence space of this segment. 3) That we
11325 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11326 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11327 * SEG.Len, This modified check allows us to overcome RFC1323's
11328 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11329 * p.869. In such cases, we can still calculate the RTT correctly
11330 * when RCV.NXT == Last.ACK.Sent.
11332 if ((to->to_flags & TOF_TS) != 0 &&
11333 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11334 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11335 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11336 tp->ts_recent_age = tcp_ts_getticks();
11337 tp->ts_recent = to->to_tsval;
11340 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11341 * is on (half-synchronized state), then queue data for later
11342 * processing; else drop segment and return.
11344 if ((thflags & TH_ACK) == 0) {
11345 if (tp->t_flags & TF_NEEDSYN) {
11346 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11347 tiwin, thflags, nxt_pkt));
11349 } else if (tp->t_flags & TF_ACKNOW) {
11350 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11351 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11354 ctf_do_drop(m, NULL);
11361 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
11364 if (sbavail(&so->so_snd)) {
11365 if (ctf_progress_timeout_check(tp, true)) {
11366 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11367 tp, tick, PROGRESS_DROP, __LINE__);
11368 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11369 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11373 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11374 tiwin, thflags, nxt_pkt));
11378 rack_check_data_after_close(struct mbuf *m,
11379 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
11381 struct tcp_rack *rack;
11383 rack = (struct tcp_rack *)tp->t_fb_ptr;
11384 if (rack->rc_allow_data_af_clo == 0) {
11386 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
11387 /* tcp_close will kill the inp pre-log the Reset */
11388 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
11389 tp = tcp_close(tp);
11390 KMOD_TCPSTAT_INC(tcps_rcvafterclose);
11391 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
11394 if (sbavail(&so->so_snd) == 0)
11396 /* Ok we allow data that is ignored and a followup reset */
11397 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
11398 tp->rcv_nxt = th->th_seq + *tlen;
11399 tp->t_flags2 |= TF2_DROP_AF_DATA;
11400 rack->r_wanted_output = 1;
11406 * Return value of 1, the TCB is unlocked and most
11407 * likely gone, return value of 0, the TCP is still
11411 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
11412 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11413 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11415 int32_t ret_val = 0;
11416 int32_t ourfinisacked = 0;
11417 struct tcp_rack *rack;
11419 rack = (struct tcp_rack *)tp->t_fb_ptr;
11420 ctf_calc_rwin(so, tp);
11422 if ((thflags & TH_RST) ||
11423 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11424 return (ctf_process_rst(m, th, so, tp));
11426 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11427 * synchronized state.
11429 if (thflags & TH_SYN) {
11430 ctf_challenge_ack(m, th, tp, &ret_val);
11434 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11435 * it's less than ts_recent, drop it.
11437 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11438 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11439 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11442 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11443 &rack->r_ctl.challenge_ack_ts,
11444 &rack->r_ctl.challenge_ack_cnt)) {
11448 * If new data are received on a connection after the user processes
11449 * are gone, then RST the other end.
11451 if ((so->so_state & SS_NOFDREF) && tlen) {
11452 if (rack_check_data_after_close(m, tp, &tlen, th, so))
11456 * If last ACK falls within this segment's sequence numbers, record
11457 * its timestamp. NOTE: 1) That the test incorporates suggestions
11458 * from the latest proposal of the tcplw@cray.com list (Braden
11459 * 1993/04/26). 2) That updating only on newer timestamps interferes
11460 * with our earlier PAWS tests, so this check should be solely
11461 * predicated on the sequence space of this segment. 3) That we
11462 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11463 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11464 * SEG.Len, This modified check allows us to overcome RFC1323's
11465 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11466 * p.869. In such cases, we can still calculate the RTT correctly
11467 * when RCV.NXT == Last.ACK.Sent.
11469 if ((to->to_flags & TOF_TS) != 0 &&
11470 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11471 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11472 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11473 tp->ts_recent_age = tcp_ts_getticks();
11474 tp->ts_recent = to->to_tsval;
11477 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11478 * is on (half-synchronized state), then queue data for later
11479 * processing; else drop segment and return.
11481 if ((thflags & TH_ACK) == 0) {
11482 if (tp->t_flags & TF_NEEDSYN) {
11483 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11484 tiwin, thflags, nxt_pkt));
11485 } else if (tp->t_flags & TF_ACKNOW) {
11486 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11487 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11490 ctf_do_drop(m, NULL);
11497 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11500 if (ourfinisacked) {
11502 * If we can't receive any more data, then closing user can
11503 * proceed. Starting the timer is contrary to the
11504 * specification, but if we don't get a FIN we'll hang
11507 * XXXjl: we should release the tp also, and use a
11508 * compressed state.
11510 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
11511 soisdisconnected(so);
11512 tcp_timer_activate(tp, TT_2MSL,
11513 (tcp_fast_finwait2_recycle ?
11514 tcp_finwait2_timeout :
11517 tcp_state_change(tp, TCPS_FIN_WAIT_2);
11519 if (sbavail(&so->so_snd)) {
11520 if (ctf_progress_timeout_check(tp, true)) {
11521 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11522 tp, tick, PROGRESS_DROP, __LINE__);
11523 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11524 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11528 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11529 tiwin, thflags, nxt_pkt));
11533 * Return value of 1, the TCB is unlocked and most
11534 * likely gone, return value of 0, the TCP is still
11538 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
11539 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11540 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11542 int32_t ret_val = 0;
11543 int32_t ourfinisacked = 0;
11544 struct tcp_rack *rack;
11546 rack = (struct tcp_rack *)tp->t_fb_ptr;
11547 ctf_calc_rwin(so, tp);
11549 if ((thflags & TH_RST) ||
11550 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11551 return (ctf_process_rst(m, th, so, tp));
11553 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11554 * synchronized state.
11556 if (thflags & TH_SYN) {
11557 ctf_challenge_ack(m, th, tp, &ret_val);
11561 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11562 * it's less than ts_recent, drop it.
11564 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11565 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11566 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11569 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11570 &rack->r_ctl.challenge_ack_ts,
11571 &rack->r_ctl.challenge_ack_cnt)) {
11575 * If new data are received on a connection after the user processes
11576 * are gone, then RST the other end.
11578 if ((so->so_state & SS_NOFDREF) && tlen) {
11579 if (rack_check_data_after_close(m, tp, &tlen, th, so))
11583 * If last ACK falls within this segment's sequence numbers, record
11584 * its timestamp. NOTE: 1) That the test incorporates suggestions
11585 * from the latest proposal of the tcplw@cray.com list (Braden
11586 * 1993/04/26). 2) That updating only on newer timestamps interferes
11587 * with our earlier PAWS tests, so this check should be solely
11588 * predicated on the sequence space of this segment. 3) That we
11589 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11590 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11591 * SEG.Len, This modified check allows us to overcome RFC1323's
11592 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11593 * p.869. In such cases, we can still calculate the RTT correctly
11594 * when RCV.NXT == Last.ACK.Sent.
11596 if ((to->to_flags & TOF_TS) != 0 &&
11597 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11598 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11599 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11600 tp->ts_recent_age = tcp_ts_getticks();
11601 tp->ts_recent = to->to_tsval;
11604 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11605 * is on (half-synchronized state), then queue data for later
11606 * processing; else drop segment and return.
11608 if ((thflags & TH_ACK) == 0) {
11609 if (tp->t_flags & TF_NEEDSYN) {
11610 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11611 tiwin, thflags, nxt_pkt));
11612 } else if (tp->t_flags & TF_ACKNOW) {
11613 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11614 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11617 ctf_do_drop(m, NULL);
11624 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11627 if (ourfinisacked) {
11632 if (sbavail(&so->so_snd)) {
11633 if (ctf_progress_timeout_check(tp, true)) {
11634 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11635 tp, tick, PROGRESS_DROP, __LINE__);
11636 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11637 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11641 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11642 tiwin, thflags, nxt_pkt));
11646 * Return value of 1, the TCB is unlocked and most
11647 * likely gone, return value of 0, the TCP is still
11651 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
11652 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11653 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11655 int32_t ret_val = 0;
11656 int32_t ourfinisacked = 0;
11657 struct tcp_rack *rack;
11659 rack = (struct tcp_rack *)tp->t_fb_ptr;
11660 ctf_calc_rwin(so, tp);
11662 if ((thflags & TH_RST) ||
11663 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11664 return (ctf_process_rst(m, th, so, tp));
11666 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11667 * synchronized state.
11669 if (thflags & TH_SYN) {
11670 ctf_challenge_ack(m, th, tp, &ret_val);
11674 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11675 * it's less than ts_recent, drop it.
11677 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11678 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11679 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11682 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11683 &rack->r_ctl.challenge_ack_ts,
11684 &rack->r_ctl.challenge_ack_cnt)) {
11688 * If new data are received on a connection after the user processes
11689 * are gone, then RST the other end.
11691 if ((so->so_state & SS_NOFDREF) && tlen) {
11692 if (rack_check_data_after_close(m, tp, &tlen, th, so))
11696 * If last ACK falls within this segment's sequence numbers, record
11697 * its timestamp. NOTE: 1) That the test incorporates suggestions
11698 * from the latest proposal of the tcplw@cray.com list (Braden
11699 * 1993/04/26). 2) That updating only on newer timestamps interferes
11700 * with our earlier PAWS tests, so this check should be solely
11701 * predicated on the sequence space of this segment. 3) That we
11702 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11703 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11704 * SEG.Len, This modified check allows us to overcome RFC1323's
11705 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11706 * p.869. In such cases, we can still calculate the RTT correctly
11707 * when RCV.NXT == Last.ACK.Sent.
11709 if ((to->to_flags & TOF_TS) != 0 &&
11710 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11711 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11712 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11713 tp->ts_recent_age = tcp_ts_getticks();
11714 tp->ts_recent = to->to_tsval;
11717 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11718 * is on (half-synchronized state), then queue data for later
11719 * processing; else drop segment and return.
11721 if ((thflags & TH_ACK) == 0) {
11722 if (tp->t_flags & TF_NEEDSYN) {
11723 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11724 tiwin, thflags, nxt_pkt));
11725 } else if (tp->t_flags & TF_ACKNOW) {
11726 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11727 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11730 ctf_do_drop(m, NULL);
11735 * case TCPS_LAST_ACK: Ack processing.
11737 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11740 if (ourfinisacked) {
11741 tp = tcp_close(tp);
11742 ctf_do_drop(m, tp);
11745 if (sbavail(&so->so_snd)) {
11746 if (ctf_progress_timeout_check(tp, true)) {
11747 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11748 tp, tick, PROGRESS_DROP, __LINE__);
11749 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11750 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11754 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11755 tiwin, thflags, nxt_pkt));
11759 * Return value of 1, the TCB is unlocked and most
11760 * likely gone, return value of 0, the TCP is still
11764 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
11765 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11766 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11768 int32_t ret_val = 0;
11769 int32_t ourfinisacked = 0;
11770 struct tcp_rack *rack;
11772 rack = (struct tcp_rack *)tp->t_fb_ptr;
11773 ctf_calc_rwin(so, tp);
11775 /* Reset receive buffer auto scaling when not in bulk receive mode. */
11776 if ((thflags & TH_RST) ||
11777 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11778 return (ctf_process_rst(m, th, so, tp));
11780 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11781 * synchronized state.
11783 if (thflags & TH_SYN) {
11784 ctf_challenge_ack(m, th, tp, &ret_val);
11788 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11789 * it's less than ts_recent, drop it.
11791 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11792 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11793 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11796 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11797 &rack->r_ctl.challenge_ack_ts,
11798 &rack->r_ctl.challenge_ack_cnt)) {
11802 * If new data are received on a connection after the user processes
11803 * are gone, then RST the other end.
11805 if ((so->so_state & SS_NOFDREF) &&
11807 if (rack_check_data_after_close(m, tp, &tlen, th, so))
11811 * If last ACK falls within this segment's sequence numbers, record
11812 * its timestamp. NOTE: 1) That the test incorporates suggestions
11813 * from the latest proposal of the tcplw@cray.com list (Braden
11814 * 1993/04/26). 2) That updating only on newer timestamps interferes
11815 * with our earlier PAWS tests, so this check should be solely
11816 * predicated on the sequence space of this segment. 3) That we
11817 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11818 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11819 * SEG.Len, This modified check allows us to overcome RFC1323's
11820 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11821 * p.869. In such cases, we can still calculate the RTT correctly
11822 * when RCV.NXT == Last.ACK.Sent.
11824 if ((to->to_flags & TOF_TS) != 0 &&
11825 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11826 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11827 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11828 tp->ts_recent_age = tcp_ts_getticks();
11829 tp->ts_recent = to->to_tsval;
11832 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11833 * is on (half-synchronized state), then queue data for later
11834 * processing; else drop segment and return.
11836 if ((thflags & TH_ACK) == 0) {
11837 if (tp->t_flags & TF_NEEDSYN) {
11838 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11839 tiwin, thflags, nxt_pkt));
11840 } else if (tp->t_flags & TF_ACKNOW) {
11841 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11842 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11845 ctf_do_drop(m, NULL);
11852 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11855 if (sbavail(&so->so_snd)) {
11856 if (ctf_progress_timeout_check(tp, true)) {
11857 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11858 tp, tick, PROGRESS_DROP, __LINE__);
11859 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11860 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11864 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11865 tiwin, thflags, nxt_pkt));
11869 rack_clear_rate_sample(struct tcp_rack *rack)
11871 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
11872 rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
11873 rack->r_ctl.rack_rs.rs_rtt_tot = 0;
11877 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override)
11879 uint64_t bw_est, rate_wanted;
11881 uint32_t user_max, orig_min, orig_max;
11883 orig_min = rack->r_ctl.rc_pace_min_segs;
11884 orig_max = rack->r_ctl.rc_pace_max_segs;
11885 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs;
11886 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs)
11888 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp);
11889 if (rack->use_fixed_rate || rack->rc_force_max_seg) {
11890 if (user_max != rack->r_ctl.rc_pace_max_segs)
11893 if (rack->rc_force_max_seg) {
11894 rack->r_ctl.rc_pace_max_segs = user_max;
11895 } else if (rack->use_fixed_rate) {
11896 bw_est = rack_get_bw(rack);
11897 if ((rack->r_ctl.crte == NULL) ||
11898 (bw_est != rack->r_ctl.crte->rate)) {
11899 rack->r_ctl.rc_pace_max_segs = user_max;
11901 /* We are pacing right at the hardware rate */
11904 segsiz = min(ctf_fixed_maxseg(tp),
11905 rack->r_ctl.rc_pace_min_segs);
11906 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(
11907 tp, bw_est, segsiz, 0,
11908 rack->r_ctl.crte, NULL);
11910 } else if (rack->rc_always_pace) {
11911 if (rack->r_ctl.gp_bw ||
11912 #ifdef NETFLIX_PEAKRATE
11913 rack->rc_tp->t_maxpeakrate ||
11915 rack->r_ctl.init_rate) {
11916 /* We have a rate of some sort set */
11919 bw_est = rack_get_bw(rack);
11920 orig = rack->r_ctl.rc_pace_max_segs;
11922 rate_wanted = *fill_override;
11924 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL);
11926 /* We have something */
11927 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack,
11929 ctf_fixed_maxseg(rack->rc_tp));
11931 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs;
11932 if (orig != rack->r_ctl.rc_pace_max_segs)
11934 } else if ((rack->r_ctl.gp_bw == 0) &&
11935 (rack->r_ctl.rc_pace_max_segs == 0)) {
11937 * If we have nothing limit us to bursting
11938 * out IW sized pieces.
11941 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack);
11944 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) {
11946 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES;
11949 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2);
11954 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack)
11957 struct ip6_hdr *ip6 = NULL;
11960 struct ip *ip = NULL;
11962 struct udphdr *udp = NULL;
11964 /* Ok lets fill in the fast block, it can only be used with no IP options! */
11966 if (rack->r_is_v6) {
11967 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
11968 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
11970 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
11971 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
11972 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
11973 udp->uh_dport = tp->t_port;
11974 rack->r_ctl.fsb.udp = udp;
11975 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
11978 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1);
11979 rack->r_ctl.fsb.udp = NULL;
11981 tcpip_fillheaders(rack->rc_inp,
11983 ip6, rack->r_ctl.fsb.th);
11987 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr);
11988 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
11990 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
11991 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
11992 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
11993 udp->uh_dport = tp->t_port;
11994 rack->r_ctl.fsb.udp = udp;
11995 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
11998 rack->r_ctl.fsb.udp = NULL;
11999 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1);
12001 tcpip_fillheaders(rack->rc_inp,
12003 ip, rack->r_ctl.fsb.th);
12005 rack->r_fsb_inited = 1;
12009 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack)
12012 * Allocate the larger of spaces V6 if available else just
12013 * V4 and include udphdr (overbook)
12016 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr);
12018 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr);
12020 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len,
12021 M_TCPFSB, M_NOWAIT|M_ZERO);
12022 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) {
12025 rack->r_fsb_inited = 0;
12030 rack_init(struct tcpcb *tp)
12032 struct tcp_rack *rack = NULL;
12033 struct rack_sendmap *insret;
12034 uint32_t iwin, snt, us_cts;
12037 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
12038 if (tp->t_fb_ptr == NULL) {
12040 * We need to allocate memory but cant. The INP and INP_INFO
12041 * locks and they are recusive (happens during setup. So a
12042 * scheme to drop the locks fails :(
12047 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack));
12049 rack = (struct tcp_rack *)tp->t_fb_ptr;
12050 RB_INIT(&rack->r_ctl.rc_mtree);
12051 TAILQ_INIT(&rack->r_ctl.rc_free);
12052 TAILQ_INIT(&rack->r_ctl.rc_tmap);
12054 rack->rc_inp = tp->t_inpcb;
12056 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
12057 /* Probably not needed but lets be sure */
12058 rack_clear_rate_sample(rack);
12060 * Save off the default values, socket options will poke
12061 * at these if pacing is not on or we have not yet
12062 * reached where pacing is on (gp_ready/fixed enabled).
12063 * When they get set into the CC module (when gp_ready
12064 * is enabled or we enable fixed) then we will set these
12065 * values into the CC and place in here the old values
12066 * so we have a restoral. Then we will set the flag
12067 * rc_pacing_cc_set. That way whenever we turn off pacing
12068 * or switch off this stack, we will know to go restore
12069 * the saved values.
12071 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn;
12072 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn;
12073 /* We want abe like behavior as well */
12074 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN;
12075 rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
12076 rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
12077 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
12079 rack->use_rack_rr = 1;
12080 if (V_tcp_delack_enabled)
12081 tp->t_delayed_ack = 1;
12083 tp->t_delayed_ack = 0;
12084 #ifdef TCP_ACCOUNTING
12085 if (rack_tcp_accounting) {
12086 tp->t_flags2 |= TF2_TCP_ACCOUNTING;
12089 if (rack_enable_shared_cwnd)
12090 rack->rack_enable_scwnd = 1;
12091 rack->rc_user_set_max_segs = rack_hptsi_segments;
12092 rack->rc_force_max_seg = 0;
12093 if (rack_use_imac_dack)
12094 rack->rc_dack_mode = 1;
12095 TAILQ_INIT(&rack->r_ctl.opt_list);
12096 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
12097 rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
12098 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
12099 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff;
12100 rack->r_ctl.rc_highest_us_rtt = 0;
12101 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap;
12102 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop);
12103 if (rack_use_cmp_acks)
12104 rack->r_use_cmp_ack = 1;
12105 if (rack_disable_prr)
12106 rack->rack_no_prr = 1;
12107 if (rack_gp_no_rec_chg)
12108 rack->rc_gp_no_rec_chg = 1;
12109 if (rack_pace_every_seg && tcp_can_enable_pacing()) {
12110 rack->rc_always_pace = 1;
12111 if (rack->use_fixed_rate || rack->gp_ready)
12112 rack_set_cc_pacing(rack);
12114 rack->rc_always_pace = 0;
12115 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack)
12116 rack->r_mbuf_queue = 1;
12118 rack->r_mbuf_queue = 0;
12119 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
12120 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
12122 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
12123 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12124 if (rack_limits_scwnd)
12125 rack->r_limit_scw = 1;
12127 rack->r_limit_scw = 0;
12128 rack->rc_labc = V_tcp_abc_l_var;
12129 rack->r_ctl.rc_high_rwnd = tp->snd_wnd;
12130 rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
12131 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
12132 rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
12133 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
12134 rack->r_ctl.rc_min_to = rack_min_to;
12135 microuptime(&rack->r_ctl.act_rcv_time);
12136 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
12137 rack->r_running_late = 0;
12138 rack->r_running_early = 0;
12139 rack->rc_init_win = rack_default_init_window;
12140 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss;
12141 if (rack_hw_up_only)
12142 rack->r_up_only = 1;
12143 if (rack_do_dyn_mul) {
12144 /* When dynamic adjustment is on CA needs to start at 100% */
12145 rack->rc_gp_dyn_mul = 1;
12146 if (rack_do_dyn_mul >= 100)
12147 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
12149 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
12150 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec;
12151 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
12152 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
12153 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN,
12154 rack_probertt_filter_life);
12155 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
12156 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
12157 rack->r_ctl.rc_time_of_last_probertt = us_cts;
12158 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks();
12159 rack->r_ctl.rc_time_probertt_starts = 0;
12160 /* We require at least one measurement, even if the sysctl is 0 */
12161 if (rack_req_measurements)
12162 rack->r_ctl.req_measurements = rack_req_measurements;
12164 rack->r_ctl.req_measurements = 1;
12165 if (rack_enable_hw_pacing)
12166 rack->rack_hdw_pace_ena = 1;
12167 if (rack_hw_rate_caps)
12168 rack->r_rack_hw_rate_caps = 1;
12169 /* Do we force on detection? */
12170 #ifdef NETFLIX_EXP_DETECTION
12171 if (tcp_force_detection)
12172 rack->do_detection = 1;
12175 rack->do_detection = 0;
12176 if (rack_non_rxt_use_cr)
12177 rack->rack_rec_nonrxt_use_cr = 1;
12178 err = rack_init_fsb(tp, rack);
12180 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12181 tp->t_fb_ptr = NULL;
12184 if (tp->snd_una != tp->snd_max) {
12185 /* Create a send map for the current outstanding data */
12186 struct rack_sendmap *rsm;
12188 rsm = rack_alloc(rack);
12190 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12191 tp->t_fb_ptr = NULL;
12194 rsm->r_no_rtt_allowed = 1;
12195 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
12196 rsm->r_rtr_cnt = 1;
12197 rsm->r_rtr_bytes = 0;
12198 if (tp->t_flags & TF_SENTFIN) {
12199 rsm->r_end = tp->snd_max - 1;
12200 rsm->r_flags |= RACK_HAS_FIN;
12202 rsm->r_end = tp->snd_max;
12204 if (tp->snd_una == tp->iss) {
12205 /* The data space is one beyond snd_una */
12206 rsm->r_flags |= RACK_HAS_SYN;
12207 rsm->r_start = tp->iss;
12208 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una);
12210 rsm->r_start = tp->snd_una;
12212 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) {
12213 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff);
12215 rsm->orig_m_len = rsm->m->m_len;
12217 rsm->orig_m_len = 0;
12220 * This can happen if we have a stand-alone FIN or
12224 rsm->orig_m_len = 0;
12227 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12229 if (insret != NULL) {
12230 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p",
12231 insret, rack, rsm);
12234 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
12235 rsm->r_in_tmap = 1;
12238 * Timers in Rack are kept in microseconds so lets
12239 * convert any initial incoming variables
12240 * from ticks into usecs. Note that we
12241 * also change the values of t_srtt and t_rttvar, if
12242 * they are non-zero. They are kept with a 5
12243 * bit decimal so we have to carefully convert
12244 * these to get the full precision.
12246 rack_convert_rtts(tp);
12247 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow);
12248 if (rack_def_profile)
12249 rack_set_profile(rack, rack_def_profile);
12250 /* Cancel the GP measurement in progress */
12251 tp->t_flags &= ~TF_GPUTINPROG;
12252 if (SEQ_GT(tp->snd_max, tp->iss))
12253 snt = tp->snd_max - tp->iss;
12256 iwin = rc_init_window(rack);
12258 /* We are not past the initial window
12259 * so we need to make sure cwnd is
12262 if (tp->snd_cwnd < iwin)
12263 tp->snd_cwnd = iwin;
12265 * If we are within the initial window
12266 * we want ssthresh to be unlimited. Setting
12267 * it to the rwnd (which the default stack does
12268 * and older racks) is not really a good idea
12269 * since we want to be in SS and grow both the
12270 * cwnd and the rwnd (via dynamic rwnd growth). If
12271 * we set it to the rwnd then as the peer grows its
12272 * rwnd we will be stuck in CA and never hit SS.
12274 * Its far better to raise it up high (this takes the
12275 * risk that there as been a loss already, probably
12276 * we should have an indicator in all stacks of loss
12277 * but we don't), but considering the normal use this
12278 * is a risk worth taking. The consequences of not
12279 * hitting SS are far worse than going one more time
12280 * into it early on (before we have sent even a IW).
12281 * It is highly unlikely that we will have had a loss
12282 * before getting the IW out.
12284 tp->snd_ssthresh = 0xffffffff;
12286 rack_stop_all_timers(tp);
12287 /* Lets setup the fsb block */
12288 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
12289 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur,
12290 __LINE__, RACK_RTTS_INIT);
12295 rack_handoff_ok(struct tcpcb *tp)
12297 if ((tp->t_state == TCPS_CLOSED) ||
12298 (tp->t_state == TCPS_LISTEN)) {
12299 /* Sure no problem though it may not stick */
12302 if ((tp->t_state == TCPS_SYN_SENT) ||
12303 (tp->t_state == TCPS_SYN_RECEIVED)) {
12305 * We really don't know if you support sack,
12306 * you have to get to ESTAB or beyond to tell.
12310 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) {
12312 * Rack will only send a FIN after all data is acknowledged.
12313 * So in this case we have more data outstanding. We can't
12314 * switch stacks until either all data and only the FIN
12315 * is left (in which case rack_init() now knows how
12316 * to deal with that) <or> all is acknowledged and we
12317 * are only left with incoming data, though why you
12318 * would want to switch to rack after all data is acknowledged
12319 * I have no idea (rrs)!
12323 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){
12327 * If we reach here we don't do SACK on this connection so we can
12335 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
12339 if (tp->t_fb_ptr) {
12340 struct tcp_rack *rack;
12341 struct rack_sendmap *rsm, *nrsm, *rm;
12343 rack = (struct tcp_rack *)tp->t_fb_ptr;
12344 if (tp->t_in_pkt) {
12346 * It is unsafe to process the packets since a
12347 * reset may be lurking in them (its rare but it
12348 * can occur). If we were to find a RST, then we
12349 * would end up dropping the connection and the
12350 * INP lock, so when we return the caller (tcp_usrreq)
12351 * will blow up when it trys to unlock the inp.
12353 struct mbuf *save, *m;
12356 tp->t_in_pkt = NULL;
12357 tp->t_tail_pkt = NULL;
12359 save = m->m_nextpkt;
12360 m->m_nextpkt = NULL;
12364 if ((tp->t_inpcb) &&
12365 (tp->t_inpcb->inp_flags2 & INP_MBUF_ACKCMP))
12368 /* Total if we used large or small (if ack-cmp was used). */
12369 if (rack->rc_inp->inp_flags2 & INP_MBUF_L_ACKS)
12370 counter_u64_add(rack_large_ackcmp, 1);
12372 counter_u64_add(rack_small_ackcmp, 1);
12375 tp->t_flags &= ~TF_FORCEDATA;
12376 #ifdef NETFLIX_SHARED_CWND
12377 if (rack->r_ctl.rc_scw) {
12380 if (rack->r_limit_scw)
12381 limit = max(1, rack->r_ctl.rc_lowest_us_rtt);
12384 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw,
12385 rack->r_ctl.rc_scw_index,
12387 rack->r_ctl.rc_scw = NULL;
12390 if (rack->r_ctl.fsb.tcp_ip_hdr) {
12391 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB);
12392 rack->r_ctl.fsb.tcp_ip_hdr = NULL;
12393 rack->r_ctl.fsb.th = NULL;
12395 /* Convert back to ticks, with */
12396 if (tp->t_srtt > 1) {
12397 uint32_t val, frac;
12399 val = USEC_2_TICKS(tp->t_srtt);
12400 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz);
12401 tp->t_srtt = val << TCP_RTT_SHIFT;
12403 * frac is the fractional part here is left
12404 * over from converting to hz and shifting.
12405 * We need to convert this to the 5 bit
12410 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC);
12412 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC);
12414 tp->t_srtt += frac;
12417 if (tp->t_rttvar) {
12418 uint32_t val, frac;
12420 val = USEC_2_TICKS(tp->t_rttvar);
12421 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz);
12422 tp->t_rttvar = val << TCP_RTTVAR_SHIFT;
12424 * frac is the fractional part here is left
12425 * over from converting to hz and shifting.
12426 * We need to convert this to the 5 bit
12431 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC);
12433 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC);
12435 tp->t_rttvar += frac;
12438 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur);
12439 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow);
12440 if (rack->rc_always_pace) {
12441 tcp_decrement_paced_conn();
12442 rack_undo_cc_pacing(rack);
12443 rack->rc_always_pace = 0;
12445 /* Clean up any options if they were not applied */
12446 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) {
12447 struct deferred_opt_list *dol;
12449 dol = TAILQ_FIRST(&rack->r_ctl.opt_list);
12450 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
12451 free(dol, M_TCPDO);
12453 /* rack does not use force data but other stacks may clear it */
12454 if (rack->r_ctl.crte != NULL) {
12455 tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
12456 rack->rack_hdrw_pacing = 0;
12457 rack->r_ctl.crte = NULL;
12459 #ifdef TCP_BLACKBOX
12460 tcp_log_flowend(tp);
12462 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) {
12463 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12466 panic("At fini, rack:%p rsm:%p rm:%p",
12470 uma_zfree(rack_zone, rsm);
12472 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
12474 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
12475 uma_zfree(rack_zone, rsm);
12476 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
12478 rack->rc_free_cnt = 0;
12479 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12480 tp->t_fb_ptr = NULL;
12483 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
12484 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
12485 tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
12486 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP;
12487 /* Cancel the GP measurement in progress */
12488 tp->t_flags &= ~TF_GPUTINPROG;
12489 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS;
12491 /* Make sure snd_nxt is correctly set */
12492 tp->snd_nxt = tp->snd_max;
12496 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
12498 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) {
12499 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
12501 switch (tp->t_state) {
12502 case TCPS_SYN_SENT:
12503 rack->r_state = TCPS_SYN_SENT;
12504 rack->r_substate = rack_do_syn_sent;
12506 case TCPS_SYN_RECEIVED:
12507 rack->r_state = TCPS_SYN_RECEIVED;
12508 rack->r_substate = rack_do_syn_recv;
12510 case TCPS_ESTABLISHED:
12511 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12512 rack->r_state = TCPS_ESTABLISHED;
12513 rack->r_substate = rack_do_established;
12515 case TCPS_CLOSE_WAIT:
12516 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12517 rack->r_state = TCPS_CLOSE_WAIT;
12518 rack->r_substate = rack_do_close_wait;
12520 case TCPS_FIN_WAIT_1:
12521 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12522 rack->r_state = TCPS_FIN_WAIT_1;
12523 rack->r_substate = rack_do_fin_wait_1;
12526 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12527 rack->r_state = TCPS_CLOSING;
12528 rack->r_substate = rack_do_closing;
12530 case TCPS_LAST_ACK:
12531 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12532 rack->r_state = TCPS_LAST_ACK;
12533 rack->r_substate = rack_do_lastack;
12535 case TCPS_FIN_WAIT_2:
12536 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12537 rack->r_state = TCPS_FIN_WAIT_2;
12538 rack->r_substate = rack_do_fin_wait_2;
12542 case TCPS_TIME_WAIT:
12546 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
12547 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
12552 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
12555 * We received an ack, and then did not
12556 * call send or were bounced out due to the
12557 * hpts was running. Now a timer is up as well, is
12558 * it the right timer?
12560 struct rack_sendmap *rsm;
12563 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
12564 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
12566 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
12567 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
12568 (tmr_up == PACE_TMR_RXT)) {
12569 /* Should be an RXT */
12573 /* Nothing outstanding? */
12574 if (tp->t_flags & TF_DELACK) {
12575 if (tmr_up == PACE_TMR_DELACK)
12576 /* We are supposed to have delayed ack up and we do */
12578 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) {
12580 * if we hit enobufs then we would expect the possiblity
12581 * of nothing outstanding and the RXT up (and the hptsi timer).
12584 } else if (((V_tcp_always_keepalive ||
12585 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
12586 (tp->t_state <= TCPS_CLOSING)) &&
12587 (tmr_up == PACE_TMR_KEEP) &&
12588 (tp->snd_max == tp->snd_una)) {
12589 /* We should have keep alive up and we do */
12593 if (SEQ_GT(tp->snd_max, tp->snd_una) &&
12594 ((tmr_up == PACE_TMR_TLP) ||
12595 (tmr_up == PACE_TMR_RACK) ||
12596 (tmr_up == PACE_TMR_RXT))) {
12598 * Either a Rack, TLP or RXT is fine if we
12599 * have outstanding data.
12602 } else if (tmr_up == PACE_TMR_DELACK) {
12604 * If the delayed ack was going to go off
12605 * before the rtx/tlp/rack timer were going to
12606 * expire, then that would be the timer in control.
12607 * Note we don't check the time here trusting the
12613 * Ok the timer originally started is not what we want now.
12614 * We will force the hpts to be stopped if any, and restart
12615 * with the slot set to what was in the saved slot.
12617 if (rack->rc_inp->inp_in_hpts) {
12618 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
12621 us_cts = tcp_get_usecs(NULL);
12622 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
12624 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
12626 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
12628 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
12630 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
12631 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
12636 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq)
12638 tp->snd_wnd = tiwin;
12639 rack_validate_fo_sendwin_up(tp, rack);
12642 if (tp->snd_wnd > tp->max_sndwnd)
12643 tp->max_sndwnd = tp->snd_wnd;
12644 if (tp->snd_wnd < (tp->snd_max - high_seq)) {
12645 /* The peer collapsed the window */
12646 rack_collapsed_window(rack);
12647 } else if (rack->rc_has_collapsed)
12648 rack_un_collapse_window(rack);
12649 /* Do we exit persists? */
12650 if ((rack->rc_in_persist != 0) &&
12651 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
12652 rack->r_ctl.rc_pace_min_segs))) {
12653 rack_exit_persist(tp, rack, cts);
12655 /* Do we enter persists? */
12656 if ((rack->rc_in_persist == 0) &&
12657 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
12658 TCPS_HAVEESTABLISHED(tp->t_state) &&
12659 (tp->snd_max == tp->snd_una) &&
12660 sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
12661 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
12663 * Here the rwnd is less than
12664 * the pacing size, we are established,
12665 * nothing is outstanding, and there is
12666 * data to send. Enter persists.
12668 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
12673 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq)
12676 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
12677 union tcp_log_stackspecific log;
12678 struct timeval ltv;
12679 char tcp_hdr_buf[60];
12681 struct timespec ts;
12682 uint32_t orig_snd_una;
12685 #ifdef NETFLIX_HTTP_LOGGING
12686 struct http_sendfile_track *http_req;
12688 if (SEQ_GT(ae->ack, tp->snd_una)) {
12689 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1));
12691 http_req = tcp_http_find_req_for_seq(tp, ae->ack);
12694 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
12695 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
12696 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
12697 if (rack->rack_no_prr == 0)
12698 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
12700 log.u_bbr.flex1 = 0;
12701 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
12702 log.u_bbr.use_lt_bw <<= 1;
12703 log.u_bbr.use_lt_bw |= rack->r_might_revert;
12704 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
12705 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
12706 log.u_bbr.pkts_out = tp->t_maxseg;
12707 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
12708 log.u_bbr.flex7 = 1;
12709 log.u_bbr.lost = ae->flags;
12710 log.u_bbr.cwnd_gain = ackval;
12711 log.u_bbr.pacing_gain = 0x2;
12712 if (ae->flags & TSTMP_HDWR) {
12713 /* Record the hardware timestamp if present */
12714 log.u_bbr.flex3 = M_TSTMP;
12715 ts.tv_sec = ae->timestamp / 1000000000;
12716 ts.tv_nsec = ae->timestamp % 1000000000;
12717 ltv.tv_sec = ts.tv_sec;
12718 ltv.tv_usec = ts.tv_nsec / 1000;
12719 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v);
12720 } else if (ae->flags & TSTMP_LRO) {
12721 /* Record the LRO the arrival timestamp */
12722 log.u_bbr.flex3 = M_TSTMP_LRO;
12723 ts.tv_sec = ae->timestamp / 1000000000;
12724 ts.tv_nsec = ae->timestamp % 1000000000;
12725 ltv.tv_sec = ts.tv_sec;
12726 ltv.tv_usec = ts.tv_nsec / 1000;
12727 log.u_bbr.flex5 = tcp_tv_to_usectick(<v);
12729 log.u_bbr.timeStamp = tcp_get_usecs(<v);
12730 /* Log the rcv time */
12731 log.u_bbr.delRate = ae->timestamp;
12732 #ifdef NETFLIX_HTTP_LOGGING
12733 log.u_bbr.applimited = tp->t_http_closed;
12734 log.u_bbr.applimited <<= 8;
12735 log.u_bbr.applimited |= tp->t_http_open;
12736 log.u_bbr.applimited <<= 8;
12737 log.u_bbr.applimited |= tp->t_http_req;
12739 /* Copy out any client req info */
12741 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
12743 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
12744 log.u_bbr.rttProp = http_req->timestamp;
12745 log.u_bbr.cur_del_rate = http_req->start;
12746 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
12747 log.u_bbr.flex8 |= 1;
12749 log.u_bbr.flex8 |= 2;
12750 log.u_bbr.bw_inuse = http_req->end;
12752 log.u_bbr.flex6 = http_req->start_seq;
12753 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
12754 log.u_bbr.flex8 |= 4;
12755 log.u_bbr.epoch = http_req->end_seq;
12759 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf));
12760 th = (struct tcphdr *)tcp_hdr_buf;
12761 th->th_seq = ae->seq;
12762 th->th_ack = ae->ack;
12763 th->th_win = ae->win;
12764 /* Now fill in the ports */
12765 th->th_sport = tp->t_inpcb->inp_fport;
12766 th->th_dport = tp->t_inpcb->inp_lport;
12767 th->th_flags = ae->flags & 0xff;
12768 /* Now do we have a timestamp option? */
12769 if (ae->flags & HAS_TSTMP) {
12773 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2);
12774 cp = (u_char *)(th + 1);
12779 *cp = TCPOPT_TIMESTAMP;
12781 *cp = TCPOLEN_TIMESTAMP;
12783 val = htonl(ae->ts_value);
12784 bcopy((char *)&val,
12785 (char *)cp, sizeof(uint32_t));
12786 val = htonl(ae->ts_echo);
12787 bcopy((char *)&val,
12788 (char *)(cp + 4), sizeof(uint32_t));
12790 th->th_off = (sizeof(struct tcphdr) >> 2);
12793 * For sane logging we need to play a little trick.
12794 * If the ack were fully processed we would have moved
12795 * snd_una to high_seq, but since compressed acks are
12796 * processed in two phases, at this point (logging) snd_una
12797 * won't be advanced. So we would see multiple acks showing
12798 * the advancement. We can prevent that by "pretending" that
12799 * snd_una was advanced and then un-advancing it so that the
12800 * logging code has the right value for tlb_snd_una.
12802 if (tp->snd_una != high_seq) {
12803 orig_snd_una = tp->snd_una;
12804 tp->snd_una = high_seq;
12808 TCP_LOG_EVENTP(tp, th,
12809 &tp->t_inpcb->inp_socket->so_rcv,
12810 &tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0,
12811 0, &log, true, <v);
12813 tp->snd_una = orig_snd_una;
12820 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv)
12823 * Handle a "special" compressed ack mbuf. Each incoming
12824 * ack has only four possible dispositions:
12826 * A) It moves the cum-ack forward
12827 * B) It is behind the cum-ack.
12828 * C) It is a window-update ack.
12829 * D) It is a dup-ack.
12831 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES
12832 * in the incoming mbuf. We also need to still pay attention
12833 * to nxt_pkt since there may be another packet after this
12836 #ifdef TCP_ACCOUNTING
12841 struct timespec ts;
12842 struct tcp_rack *rack;
12843 struct tcp_ackent *ae;
12844 uint32_t tiwin, us_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack;
12845 int cnt, i, did_out, ourfinisacked = 0;
12846 int win_up_req = 0;
12847 struct tcpopt to_holder, *to = NULL;
12849 int under_pacing = 1;
12852 #ifdef TCP_ACCOUNTING
12855 rack = (struct tcp_rack *)tp->t_fb_ptr;
12856 if (rack->gp_ready &&
12857 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT))
12862 if (rack->r_state != tp->t_state)
12863 rack_set_state(tp, rack);
12866 KASSERT((m->m_len >= sizeof(struct tcp_ackent)),
12867 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len));
12868 cnt = m->m_len / sizeof(struct tcp_ackent);
12870 if (idx >= MAX_NUM_OF_CNTS)
12871 idx = MAX_NUM_OF_CNTS - 1;
12872 counter_u64_add(rack_proc_comp_ack[idx], 1);
12873 counter_u64_add(rack_multi_single_eq, cnt);
12874 high_seq = tp->snd_una;
12875 the_win = tp->snd_wnd;
12876 win_seq = tp->snd_wl1;
12877 win_upd_ack = tp->snd_wl2;
12878 cts = us_cts = tcp_tv_to_usectick(tv);
12879 segsiz = ctf_fixed_maxseg(tp);
12880 if ((rack->rc_gp_dyn_mul) &&
12881 (rack->use_fixed_rate == 0) &&
12882 (rack->rc_always_pace)) {
12883 /* Check in on probertt */
12884 rack_check_probe_rtt(rack, us_cts);
12886 for (i = 0; i < cnt; i++) {
12887 #ifdef TCP_ACCOUNTING
12888 ts_val = get_cyclecount();
12890 rack_clear_rate_sample(rack);
12891 ae = ((mtod(m, struct tcp_ackent *)) + i);
12892 /* Setup the window */
12893 tiwin = ae->win << tp->snd_scale;
12894 /* figure out the type of ack */
12895 if (SEQ_LT(ae->ack, high_seq)) {
12897 ae->ack_val_set = ACK_BEHIND;
12898 } else if (SEQ_GT(ae->ack, high_seq)) {
12900 ae->ack_val_set = ACK_CUMACK;
12901 } else if (tiwin == the_win) {
12903 ae->ack_val_set = ACK_DUPACK;
12906 ae->ack_val_set = ACK_RWND;
12908 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq);
12909 /* Validate timestamp */
12910 if (ae->flags & HAS_TSTMP) {
12911 /* Setup for a timestamp */
12912 to->to_flags = TOF_TS;
12913 ae->ts_echo -= tp->ts_offset;
12914 to->to_tsecr = ae->ts_echo;
12915 to->to_tsval = ae->ts_value;
12917 * If echoed timestamp is later than the current time, fall back to
12918 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
12919 * were used when this connection was established.
12921 if (TSTMP_GT(ae->ts_echo, cts))
12923 if (tp->ts_recent &&
12924 TSTMP_LT(ae->ts_value, tp->ts_recent)) {
12925 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) {
12926 #ifdef TCP_ACCOUNTING
12927 rdstc = get_cyclecount();
12928 if (rdstc > ts_val) {
12929 counter_u64_add(tcp_proc_time[ae->ack_val_set] ,
12931 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
12932 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
12939 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) &&
12940 SEQ_LEQ(tp->last_ack_sent, ae->seq)) {
12941 tp->ts_recent_age = tcp_ts_getticks();
12942 tp->ts_recent = ae->ts_value;
12945 /* Setup for a no options */
12948 /* Update the rcv time and perform idle reduction possibly */
12949 if (tp->t_idle_reduce &&
12950 (tp->snd_max == tp->snd_una) &&
12951 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
12952 counter_u64_add(rack_input_idle_reduces, 1);
12953 rack_cc_after_idle(rack, tp);
12955 tp->t_rcvtime = ticks;
12956 /* Now what about ECN? */
12957 if (tp->t_flags2 & TF2_ECN_PERMIT) {
12958 if (ae->flags & TH_CWR) {
12959 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
12960 tp->t_flags |= TF_ACKNOW;
12962 switch (ae->codepoint & IPTOS_ECN_MASK) {
12964 tp->t_flags2 |= TF2_ECN_SND_ECE;
12965 KMOD_TCPSTAT_INC(tcps_ecn_ce);
12967 case IPTOS_ECN_ECT0:
12968 KMOD_TCPSTAT_INC(tcps_ecn_ect0);
12970 case IPTOS_ECN_ECT1:
12971 KMOD_TCPSTAT_INC(tcps_ecn_ect1);
12975 /* Process a packet differently from RFC3168. */
12976 cc_ecnpkt_handler_flags(tp, ae->flags, ae->codepoint);
12977 /* Congestion experienced. */
12978 if (ae->flags & TH_ECE) {
12979 rack_cong_signal(tp, CC_ECN, ae->ack);
12982 #ifdef TCP_ACCOUNTING
12983 /* Count for the specific type of ack in */
12984 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1);
12985 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
12986 tp->tcp_cnt_counters[ae->ack_val_set]++;
12990 * Note how we could move up these in the determination
12991 * above, but we don't so that way the timestamp checks (and ECN)
12992 * is done first before we do any processing on the ACK.
12993 * The non-compressed path through the code has this
12994 * weakness (noted by @jtl) that it actually does some
12995 * processing before verifying the timestamp information.
12996 * We don't take that path here which is why we set
12997 * the ack_val_set first, do the timestamp and ecn
12998 * processing, and then look at what we have setup.
13000 if (ae->ack_val_set == ACK_BEHIND) {
13002 * Case B flag reordering, if window is not closed
13003 * or it could be a keep-alive or persists
13005 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) {
13006 counter_u64_add(rack_reorder_seen, 1);
13007 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
13009 } else if (ae->ack_val_set == ACK_DUPACK) {
13012 rack_strike_dupack(rack);
13013 } else if (ae->ack_val_set == ACK_RWND) {
13017 win_upd_ack = ae->ack;
13023 if (SEQ_GT(ae->ack, tp->snd_max)) {
13025 * We just send an ack since the incoming
13026 * ack is beyond the largest seq we sent.
13028 if ((tp->t_flags & TF_ACKNOW) == 0) {
13029 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt);
13030 if (tp->t_flags && TF_ACKNOW)
13031 rack->r_wanted_output = 1;
13035 /* If the window changed setup to update */
13036 if (tiwin != tp->snd_wnd) {
13038 win_upd_ack = ae->ack;
13042 #ifdef TCP_ACCOUNTING
13043 /* Account for the acks */
13044 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13045 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz);
13047 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN],
13048 (((ae->ack - high_seq) + segsiz - 1) / segsiz));
13050 high_seq = ae->ack;
13051 /* Setup our act_rcv_time */
13052 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) {
13053 ts.tv_sec = ae->timestamp / 1000000000;
13054 ts.tv_nsec = ae->timestamp % 1000000000;
13055 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
13056 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
13058 rack->r_ctl.act_rcv_time = *tv;
13060 rack_process_to_cumack(tp, rack, ae->ack, cts, to);
13063 /* And lets be sure to commit the rtt measurements for this ack */
13064 tcp_rack_xmit_timer_commit(rack, tp);
13065 #ifdef TCP_ACCOUNTING
13066 rdstc = get_cyclecount();
13067 if (rdstc > ts_val) {
13068 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val));
13069 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13070 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
13071 if (ae->ack_val_set == ACK_CUMACK)
13072 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val);
13077 #ifdef TCP_ACCOUNTING
13078 ts_val = get_cyclecount();
13080 acked_amount = acked = (high_seq - tp->snd_una);
13082 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq);
13085 if (rack->sack_attack_disable == 0)
13086 rack_do_decay(rack);
13087 if (acked >= segsiz) {
13089 * You only get credit for
13090 * MSS and greater (and you get extra
13091 * credit for larger cum-ack moves).
13095 ac = acked / segsiz;
13096 rack->r_ctl.ack_count += ac;
13097 counter_u64_add(rack_ack_total, ac);
13099 if (rack->r_ctl.ack_count > 0xfff00000) {
13101 * reduce the number to keep us under
13104 rack->r_ctl.ack_count /= 2;
13105 rack->r_ctl.sack_count /= 2;
13107 if (tp->t_flags & TF_NEEDSYN) {
13109 * T/TCP: Connection was half-synchronized, and our SYN has
13110 * been ACK'd (so connection is now fully synchronized). Go
13111 * to non-starred state, increment snd_una for ACK of SYN,
13112 * and check if we can do window scaling.
13114 tp->t_flags &= ~TF_NEEDSYN;
13116 acked_amount = acked = (high_seq - tp->snd_una);
13118 if (acked > sbavail(&so->so_snd))
13119 acked_amount = sbavail(&so->so_snd);
13120 #ifdef NETFLIX_EXP_DETECTION
13122 * We only care on a cum-ack move if we are in a sack-disabled
13123 * state. We have already added in to the ack_count, and we never
13124 * would disable on a cum-ack move, so we only care to do the
13125 * detection if it may "undo" it, i.e. we were in disabled already.
13127 if (rack->sack_attack_disable)
13128 rack_do_detection(tp, rack, acked_amount, segsiz);
13130 if (IN_FASTRECOVERY(tp->t_flags) &&
13131 (rack->rack_no_prr == 0))
13132 rack_update_prr(tp, rack, acked_amount, high_seq);
13133 if (IN_RECOVERY(tp->t_flags)) {
13134 if (SEQ_LT(high_seq, tp->snd_recover) &&
13135 (SEQ_LT(high_seq, tp->snd_max))) {
13136 tcp_rack_partialack(tp);
13138 rack_post_recovery(tp, high_seq);
13142 /* Handle the rack-log-ack part (sendmap) */
13143 if ((sbused(&so->so_snd) == 0) &&
13144 (acked > acked_amount) &&
13145 (tp->t_state >= TCPS_FIN_WAIT_1) &&
13146 (tp->t_flags & TF_SENTFIN)) {
13148 * We must be sure our fin
13149 * was sent and acked (we can be
13150 * in FIN_WAIT_1 without having
13155 * Lets make sure snd_una is updated
13156 * since most likely acked_amount = 0 (it
13159 tp->snd_una = high_seq;
13161 /* Did we make a RTO error? */
13162 if ((tp->t_flags & TF_PREVVALID) &&
13163 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
13164 tp->t_flags &= ~TF_PREVVALID;
13165 if (tp->t_rxtshift == 1 &&
13166 (int)(ticks - tp->t_badrxtwin) < 0)
13167 rack_cong_signal(tp, CC_RTO_ERR, high_seq);
13169 /* Handle the data in the socket buffer */
13170 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1);
13171 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
13172 if (acked_amount > 0) {
13173 struct mbuf *mfree;
13175 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery);
13176 SOCKBUF_LOCK(&so->so_snd);
13177 mfree = sbcut_locked(&so->so_snd, acked);
13178 tp->snd_una = high_seq;
13179 /* Note we want to hold the sb lock through the sendmap adjust */
13180 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
13181 /* Wake up the socket if we have room to write more */
13182 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
13183 sowwakeup_locked(so);
13186 /* update progress */
13187 tp->t_acktime = ticks;
13188 rack_log_progress_event(rack, tp, tp->t_acktime,
13189 PROGRESS_UPDATE, __LINE__);
13190 /* Clear out shifts and such */
13191 tp->t_rxtshift = 0;
13192 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
13193 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
13194 rack->rc_tlp_in_progress = 0;
13195 rack->r_ctl.rc_tlp_cnt_out = 0;
13196 /* Send recover and snd_nxt must be dragged along */
13197 if (SEQ_GT(tp->snd_una, tp->snd_recover))
13198 tp->snd_recover = tp->snd_una;
13199 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
13200 tp->snd_nxt = tp->snd_una;
13202 * If the RXT timer is running we want to
13203 * stop it, so we can restart a TLP (or new RXT).
13205 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
13206 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13207 #ifdef NETFLIX_HTTP_LOGGING
13208 tcp_http_check_for_comp(rack->rc_tp, high_seq);
13210 tp->snd_wl2 = high_seq;
13212 if (under_pacing &&
13213 (rack->use_fixed_rate == 0) &&
13214 (rack->in_probe_rtt == 0) &&
13215 rack->rc_gp_dyn_mul &&
13216 rack->rc_always_pace) {
13217 /* Check if we are dragging bottom */
13218 rack_check_bottom_drag(tp, rack, so, acked);
13220 if (tp->snd_una == tp->snd_max) {
13221 tp->t_flags &= ~TF_PREVVALID;
13222 rack->r_ctl.retran_during_recovery = 0;
13223 rack->r_ctl.dsack_byte_cnt = 0;
13224 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
13225 if (rack->r_ctl.rc_went_idle_time == 0)
13226 rack->r_ctl.rc_went_idle_time = 1;
13227 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
13228 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
13230 /* Set so we might enter persists... */
13231 rack->r_wanted_output = 1;
13232 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13233 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
13234 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
13235 (sbavail(&so->so_snd) == 0) &&
13236 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
13238 * The socket was gone and the
13239 * peer sent data (not now in the past), time to
13242 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13243 /* tcp_close will kill the inp pre-log the Reset */
13244 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
13245 #ifdef TCP_ACCOUNTING
13246 rdstc = get_cyclecount();
13247 if (rdstc > ts_val) {
13248 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val));
13249 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13250 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13251 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13256 tp = tcp_close(tp);
13258 #ifdef TCP_ACCOUNTING
13264 * We would normally do drop-with-reset which would
13265 * send back a reset. We can't since we don't have
13266 * all the needed bits. Instead lets arrange for
13267 * a call to tcp_output(). That way since we
13268 * are in the closed state we will generate a reset.
13270 * Note if tcp_accounting is on we don't unpin since
13271 * we do that after the goto label.
13273 goto send_out_a_rst;
13275 if ((sbused(&so->so_snd) == 0) &&
13276 (tp->t_state >= TCPS_FIN_WAIT_1) &&
13277 (tp->t_flags & TF_SENTFIN)) {
13279 * If we can't receive any more data, then closing user can
13280 * proceed. Starting the timer is contrary to the
13281 * specification, but if we don't get a FIN we'll hang
13285 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13286 soisdisconnected(so);
13287 tcp_timer_activate(tp, TT_2MSL,
13288 (tcp_fast_finwait2_recycle ?
13289 tcp_finwait2_timeout :
13292 if (ourfinisacked == 0) {
13294 * We don't change to fin-wait-2 if we have our fin acked
13295 * which means we are probably in TCPS_CLOSING.
13297 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13301 /* Wake up the socket if we have room to write more */
13302 if (sbavail(&so->so_snd)) {
13303 rack->r_wanted_output = 1;
13304 if (ctf_progress_timeout_check(tp, true)) {
13305 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
13306 tp, tick, PROGRESS_DROP, __LINE__);
13307 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
13309 * We cheat here and don't send a RST, we should send one
13310 * when the pacer drops the connection.
13312 #ifdef TCP_ACCOUNTING
13313 rdstc = get_cyclecount();
13314 if (rdstc > ts_val) {
13315 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val));
13316 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13317 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13318 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13323 INP_WUNLOCK(rack->rc_inp);
13328 if (ourfinisacked) {
13329 switch(tp->t_state) {
13331 #ifdef TCP_ACCOUNTING
13332 rdstc = get_cyclecount();
13333 if (rdstc > ts_val) {
13334 counter_u64_add(tcp_proc_time[ACK_CUMACK] ,
13336 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13337 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13338 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13347 case TCPS_LAST_ACK:
13348 #ifdef TCP_ACCOUNTING
13349 rdstc = get_cyclecount();
13350 if (rdstc > ts_val) {
13351 counter_u64_add(tcp_proc_time[ACK_CUMACK] ,
13353 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13354 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13355 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13360 tp = tcp_close(tp);
13361 ctf_do_drop(m, tp);
13364 case TCPS_FIN_WAIT_1:
13365 #ifdef TCP_ACCOUNTING
13366 rdstc = get_cyclecount();
13367 if (rdstc > ts_val) {
13368 counter_u64_add(tcp_proc_time[ACK_CUMACK] ,
13370 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13371 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13372 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13376 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13377 soisdisconnected(so);
13378 tcp_timer_activate(tp, TT_2MSL,
13379 (tcp_fast_finwait2_recycle ?
13380 tcp_finwait2_timeout :
13383 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13389 if (rack->r_fast_output) {
13391 * We re doing fast output.. can we expand that?
13393 rack_gain_for_fastoutput(rack, tp, so, acked_amount);
13395 #ifdef TCP_ACCOUNTING
13396 rdstc = get_cyclecount();
13397 if (rdstc > ts_val) {
13398 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val));
13399 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13400 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13401 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13405 } else if (win_up_req) {
13406 rdstc = get_cyclecount();
13407 if (rdstc > ts_val) {
13408 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val));
13409 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13410 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val);
13415 /* Now is there a next packet, if so we are done */
13419 #ifdef TCP_ACCOUNTING
13422 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs);
13425 rack_handle_might_revert(tp, rack);
13426 ctf_calc_rwin(so, tp);
13427 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
13429 (void)tp->t_fb->tfb_tcp_output(tp);
13432 rack_free_trim(rack);
13433 #ifdef TCP_ACCOUNTING
13436 rack_timer_audit(tp, rack, &so->so_snd);
13437 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs);
13443 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
13444 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
13445 int32_t nxt_pkt, struct timeval *tv)
13447 #ifdef TCP_ACCOUNTING
13450 int32_t thflags, retval, did_out = 0;
13451 int32_t way_out = 0;
13454 struct timespec ts;
13456 struct tcp_rack *rack;
13457 struct rack_sendmap *rsm;
13458 int32_t prev_state = 0;
13459 #ifdef TCP_ACCOUNTING
13460 int ack_val_set = 0xf;
13465 * tv passed from common code is from either M_TSTMP_LRO or
13466 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present.
13468 if (m->m_flags & M_ACKCMP) {
13469 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv));
13471 if (m->m_flags & M_ACKCMP) {
13472 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp);
13474 nsegs = m->m_pkthdr.lro_nsegs;
13475 counter_u64_add(rack_proc_non_comp_ack, 1);
13476 thflags = th->th_flags;
13477 #ifdef TCP_ACCOUNTING
13479 if (thflags & TH_ACK)
13480 ts_val = get_cyclecount();
13482 cts = tcp_tv_to_usectick(tv);
13483 rack = (struct tcp_rack *)tp->t_fb_ptr;
13485 if ((m->m_flags & M_TSTMP) ||
13486 (m->m_flags & M_TSTMP_LRO)) {
13487 mbuf_tstmp2timespec(m, &ts);
13488 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
13489 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
13491 rack->r_ctl.act_rcv_time = *tv;
13492 kern_prefetch(rack, &prev_state);
13495 * Unscale the window into a 32-bit value. For the SYN_SENT state
13496 * the scale is zero.
13498 tiwin = th->th_win << tp->snd_scale;
13500 * Parse options on any incoming segment.
13502 memset(&to, 0, sizeof(to));
13503 tcp_dooptions(&to, (u_char *)(th + 1),
13504 (th->th_off << 2) - sizeof(struct tcphdr),
13505 (thflags & TH_SYN) ? TO_SYN : 0);
13506 #ifdef TCP_ACCOUNTING
13507 if (thflags & TH_ACK) {
13509 * We have a tradeoff here. We can either do what we are
13510 * doing i.e. pinning to this CPU and then doing the accounting
13511 * <or> we could do a critical enter, setup the rdtsc and cpu
13512 * as in below, and then validate we are on the same CPU on
13513 * exit. I have choosen to not do the critical enter since
13514 * that often will gain you a context switch, and instead lock
13515 * us (line above this if) to the same CPU with sched_pin(). This
13516 * means we may be context switched out for a higher priority
13517 * interupt but we won't be moved to another CPU.
13519 * If this occurs (which it won't very often since we most likely
13520 * are running this code in interupt context and only a higher
13521 * priority will bump us ... clock?) we will falsely add in
13522 * to the time the interupt processing time plus the ack processing
13523 * time. This is ok since its a rare event.
13525 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin,
13526 ctf_fixed_maxseg(tp));
13529 NET_EPOCH_ASSERT();
13530 INP_WLOCK_ASSERT(tp->t_inpcb);
13531 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
13533 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
13535 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
13536 union tcp_log_stackspecific log;
13537 struct timeval ltv;
13538 #ifdef NETFLIX_HTTP_LOGGING
13539 struct http_sendfile_track *http_req;
13541 if (SEQ_GT(th->th_ack, tp->snd_una)) {
13542 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1));
13544 http_req = tcp_http_find_req_for_seq(tp, th->th_ack);
13547 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
13548 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
13549 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
13550 if (rack->rack_no_prr == 0)
13551 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
13553 log.u_bbr.flex1 = 0;
13554 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
13555 log.u_bbr.use_lt_bw <<= 1;
13556 log.u_bbr.use_lt_bw |= rack->r_might_revert;
13557 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
13558 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
13559 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
13560 log.u_bbr.flex3 = m->m_flags;
13561 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
13562 log.u_bbr.lost = thflags;
13563 log.u_bbr.pacing_gain = 0x1;
13564 #ifdef TCP_ACCOUNTING
13565 log.u_bbr.cwnd_gain = ack_val_set;
13567 log.u_bbr.flex7 = 2;
13568 if (m->m_flags & M_TSTMP) {
13569 /* Record the hardware timestamp if present */
13570 mbuf_tstmp2timespec(m, &ts);
13571 ltv.tv_sec = ts.tv_sec;
13572 ltv.tv_usec = ts.tv_nsec / 1000;
13573 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v);
13574 } else if (m->m_flags & M_TSTMP_LRO) {
13575 /* Record the LRO the arrival timestamp */
13576 mbuf_tstmp2timespec(m, &ts);
13577 ltv.tv_sec = ts.tv_sec;
13578 ltv.tv_usec = ts.tv_nsec / 1000;
13579 log.u_bbr.flex5 = tcp_tv_to_usectick(<v);
13581 log.u_bbr.timeStamp = tcp_get_usecs(<v);
13582 /* Log the rcv time */
13583 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp;
13584 #ifdef NETFLIX_HTTP_LOGGING
13585 log.u_bbr.applimited = tp->t_http_closed;
13586 log.u_bbr.applimited <<= 8;
13587 log.u_bbr.applimited |= tp->t_http_open;
13588 log.u_bbr.applimited <<= 8;
13589 log.u_bbr.applimited |= tp->t_http_req;
13591 /* Copy out any client req info */
13593 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
13595 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
13596 log.u_bbr.rttProp = http_req->timestamp;
13597 log.u_bbr.cur_del_rate = http_req->start;
13598 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
13599 log.u_bbr.flex8 |= 1;
13601 log.u_bbr.flex8 |= 2;
13602 log.u_bbr.bw_inuse = http_req->end;
13604 log.u_bbr.flex6 = http_req->start_seq;
13605 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
13606 log.u_bbr.flex8 |= 4;
13607 log.u_bbr.epoch = http_req->end_seq;
13611 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
13612 tlen, &log, true, <v);
13614 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
13618 goto done_with_input;
13621 * If a segment with the ACK-bit set arrives in the SYN-SENT state
13622 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9.
13624 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
13625 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
13626 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
13627 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13628 #ifdef TCP_ACCOUNTING
13635 * Parse options on any incoming segment.
13637 tcp_dooptions(&to, (u_char *)(th + 1),
13638 (th->th_off << 2) - sizeof(struct tcphdr),
13639 (thflags & TH_SYN) ? TO_SYN : 0);
13642 * If timestamps were negotiated during SYN/ACK and a
13643 * segment without a timestamp is received, silently drop
13644 * the segment, unless it is a RST segment or missing timestamps are
13646 * See section 3.2 of RFC 7323.
13648 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) &&
13649 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) {
13653 goto done_with_input;
13657 * Segment received on connection. Reset idle time and keep-alive
13658 * timer. XXX: This should be done after segment validation to
13659 * ignore broken/spoofed segs.
13661 if (tp->t_idle_reduce &&
13662 (tp->snd_max == tp->snd_una) &&
13663 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
13664 counter_u64_add(rack_input_idle_reduces, 1);
13665 rack_cc_after_idle(rack, tp);
13667 tp->t_rcvtime = ticks;
13669 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
13671 if (tiwin > rack->r_ctl.rc_high_rwnd)
13672 rack->r_ctl.rc_high_rwnd = tiwin;
13674 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
13675 * this to occur after we've validated the segment.
13677 if (tp->t_flags2 & TF2_ECN_PERMIT) {
13678 if (thflags & TH_CWR) {
13679 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
13680 tp->t_flags |= TF_ACKNOW;
13682 switch (iptos & IPTOS_ECN_MASK) {
13684 tp->t_flags2 |= TF2_ECN_SND_ECE;
13685 KMOD_TCPSTAT_INC(tcps_ecn_ce);
13687 case IPTOS_ECN_ECT0:
13688 KMOD_TCPSTAT_INC(tcps_ecn_ect0);
13690 case IPTOS_ECN_ECT1:
13691 KMOD_TCPSTAT_INC(tcps_ecn_ect1);
13695 /* Process a packet differently from RFC3168. */
13696 cc_ecnpkt_handler(tp, th, iptos);
13698 /* Congestion experienced. */
13699 if (thflags & TH_ECE) {
13700 rack_cong_signal(tp, CC_ECN, th->th_ack);
13705 * If echoed timestamp is later than the current time, fall back to
13706 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
13707 * were used when this connection was established.
13709 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
13710 to.to_tsecr -= tp->ts_offset;
13711 if (TSTMP_GT(to.to_tsecr, cts))
13716 * If its the first time in we need to take care of options and
13717 * verify we can do SACK for rack!
13719 if (rack->r_state == 0) {
13720 /* Should be init'd by rack_init() */
13721 KASSERT(rack->rc_inp != NULL,
13722 ("%s: rack->rc_inp unexpectedly NULL", __func__));
13723 if (rack->rc_inp == NULL) {
13724 rack->rc_inp = tp->t_inpcb;
13728 * Process options only when we get SYN/ACK back. The SYN
13729 * case for incoming connections is handled in tcp_syncache.
13730 * According to RFC1323 the window field in a SYN (i.e., a
13731 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
13732 * this is traditional behavior, may need to be cleaned up.
13734 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
13735 /* Handle parallel SYN for ECN */
13736 if (!(thflags & TH_ACK) &&
13737 ((thflags & (TH_CWR | TH_ECE)) == (TH_CWR | TH_ECE)) &&
13738 ((V_tcp_do_ecn == 1) || (V_tcp_do_ecn == 2))) {
13739 tp->t_flags2 |= TF2_ECN_PERMIT;
13740 tp->t_flags2 |= TF2_ECN_SND_ECE;
13741 TCPSTAT_INC(tcps_ecn_shs);
13743 if ((to.to_flags & TOF_SCALE) &&
13744 (tp->t_flags & TF_REQ_SCALE)) {
13745 tp->t_flags |= TF_RCVD_SCALE;
13746 tp->snd_scale = to.to_wscale;
13748 tp->t_flags &= ~TF_REQ_SCALE;
13750 * Initial send window. It will be updated with the
13751 * next incoming segment to the scaled value.
13753 tp->snd_wnd = th->th_win;
13754 rack_validate_fo_sendwin_up(tp, rack);
13755 if ((to.to_flags & TOF_TS) &&
13756 (tp->t_flags & TF_REQ_TSTMP)) {
13757 tp->t_flags |= TF_RCVD_TSTMP;
13758 tp->ts_recent = to.to_tsval;
13759 tp->ts_recent_age = cts;
13761 tp->t_flags &= ~TF_REQ_TSTMP;
13762 if (to.to_flags & TOF_MSS) {
13763 tcp_mss(tp, to.to_mss);
13765 if ((tp->t_flags & TF_SACK_PERMIT) &&
13766 (to.to_flags & TOF_SACKPERM) == 0)
13767 tp->t_flags &= ~TF_SACK_PERMIT;
13768 if (IS_FASTOPEN(tp->t_flags)) {
13769 if (to.to_flags & TOF_FASTOPEN) {
13772 if (to.to_flags & TOF_MSS)
13775 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
13779 tcp_fastopen_update_cache(tp, mss,
13780 to.to_tfo_len, to.to_tfo_cookie);
13782 tcp_fastopen_disable_path(tp);
13786 * At this point we are at the initial call. Here we decide
13787 * if we are doing RACK or not. We do this by seeing if
13788 * TF_SACK_PERMIT is set and the sack-not-required is clear.
13789 * The code now does do dup-ack counting so if you don't
13790 * switch back you won't get rack & TLP, but you will still
13794 if ((rack_sack_not_required == 0) &&
13795 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
13796 tcp_switch_back_to_default(tp);
13797 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen,
13799 #ifdef TCP_ACCOUNTING
13804 tcp_set_hpts(tp->t_inpcb);
13805 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
13807 if (thflags & TH_FIN)
13808 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
13809 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
13810 if ((rack->rc_gp_dyn_mul) &&
13811 (rack->use_fixed_rate == 0) &&
13812 (rack->rc_always_pace)) {
13813 /* Check in on probertt */
13814 rack_check_probe_rtt(rack, us_cts);
13816 if (rack->forced_ack) {
13820 * A persist or keep-alive was forced out, update our
13821 * min rtt time. Note we do not worry about lost
13822 * retransmissions since KEEP-ALIVES and persists
13823 * are usually way long on times of sending (though
13824 * if we were really paranoid or worried we could
13825 * at least use timestamps if available to validate).
13827 rack->forced_ack = 0;
13828 us_rtt = us_cts - rack->r_ctl.forced_ack_ts;
13831 rack_log_rtt_upd(tp, rack, us_rtt, 0, NULL, 3);
13832 rack_apply_updated_usrtt(rack, us_rtt, us_cts);
13835 * This is the one exception case where we set the rack state
13836 * always. All other times (timers etc) we must have a rack-state
13837 * set (so we assure we have done the checks above for SACK).
13839 rack->r_ctl.rc_rcvtime = cts;
13840 if (rack->r_state != tp->t_state)
13841 rack_set_state(tp, rack);
13842 if (SEQ_GT(th->th_ack, tp->snd_una) &&
13843 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL)
13844 kern_prefetch(rsm, &prev_state);
13845 prev_state = rack->r_state;
13846 rack_clear_rate_sample(rack);
13847 retval = (*rack->r_substate) (m, th, so,
13848 tp, &to, drop_hdrlen,
13849 tlen, tiwin, thflags, nxt_pkt, iptos);
13851 if ((retval == 0) &&
13852 (tp->t_inpcb == NULL)) {
13853 panic("retval:%d tp:%p t_inpcb:NULL state:%d",
13854 retval, tp, prev_state);
13859 * If retval is 1 the tcb is unlocked and most likely the tp
13862 INP_WLOCK_ASSERT(tp->t_inpcb);
13863 if ((rack->rc_gp_dyn_mul) &&
13864 (rack->rc_always_pace) &&
13865 (rack->use_fixed_rate == 0) &&
13866 rack->in_probe_rtt &&
13867 (rack->r_ctl.rc_time_probertt_starts == 0)) {
13869 * If we are going for target, lets recheck before
13872 rack_check_probe_rtt(rack, us_cts);
13874 if (rack->set_pacing_done_a_iw == 0) {
13875 /* How much has been acked? */
13876 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) {
13877 /* We have enough to set in the pacing segment size */
13878 rack->set_pacing_done_a_iw = 1;
13879 rack_set_pace_segments(tp, rack, __LINE__, NULL);
13882 tcp_rack_xmit_timer_commit(rack, tp);
13883 #ifdef TCP_ACCOUNTING
13885 * If we set the ack_val_se to what ack processing we are doing
13886 * we also want to track how many cycles we burned. Note
13887 * the bits after tcp_output we let be "free". This is because
13888 * we are also tracking the tcp_output times as well. Note the
13889 * use of 0xf here since we only have 11 counter (0 - 0xa) and
13890 * 0xf cannot be returned and is what we initialize it too to
13891 * indicate we are not doing the tabulations.
13893 if (ack_val_set != 0xf) {
13896 crtsc = get_cyclecount();
13897 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val));
13898 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13899 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val);
13903 if (nxt_pkt == 0) {
13904 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
13907 (void)tp->t_fb->tfb_tcp_output(tp);
13909 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
13910 rack_free_trim(rack);
13912 if ((nxt_pkt == 0) &&
13913 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
13914 (SEQ_GT(tp->snd_max, tp->snd_una) ||
13915 (tp->t_flags & TF_DELACK) ||
13916 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
13917 (tp->t_state <= TCPS_CLOSING)))) {
13918 /* We could not send (probably in the hpts but stopped the timer earlier)? */
13919 if ((tp->snd_max == tp->snd_una) &&
13920 ((tp->t_flags & TF_DELACK) == 0) &&
13921 (rack->rc_inp->inp_in_hpts) &&
13922 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
13923 /* keep alive not needed if we are hptsi output yet */
13927 if (rack->rc_inp->inp_in_hpts) {
13928 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
13929 us_cts = tcp_get_usecs(NULL);
13930 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
13932 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
13935 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
13937 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
13939 if (late && (did_out == 0)) {
13941 * We are late in the sending
13942 * and we did not call the output
13943 * (this probably should not happen).
13945 goto do_output_now;
13947 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
13950 } else if (nxt_pkt == 0) {
13951 /* Do we have the correct timer running? */
13952 rack_timer_audit(tp, rack, &so->so_snd);
13956 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs));
13958 rack->r_wanted_output = 0;
13960 if (tp->t_inpcb == NULL) {
13961 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d",
13963 retval, tp, prev_state);
13966 #ifdef TCP_ACCOUNTING
13969 * Track the time (see above).
13971 if (ack_val_set != 0xf) {
13974 crtsc = get_cyclecount();
13975 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val));
13977 * Note we *DO NOT* increment the per-tcb counters since
13978 * in the else the TP may be gone!!
13983 #ifdef TCP_ACCOUNTING
13990 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
13991 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos)
13995 /* First lets see if we have old packets */
13996 if (tp->t_in_pkt) {
13997 if (ctf_do_queued_segments(so, tp, 1)) {
14002 if (m->m_flags & M_TSTMP_LRO) {
14003 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000;
14004 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000;
14006 /* Should not be should we kassert instead? */
14007 tcp_get_usecs(&tv);
14009 if (rack_do_segment_nounlock(m, th, so, tp,
14010 drop_hdrlen, tlen, iptos, 0, &tv) == 0) {
14011 INP_WUNLOCK(tp->t_inpcb);
14015 struct rack_sendmap *
14016 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
14018 struct rack_sendmap *rsm = NULL;
14020 uint32_t srtt = 0, thresh = 0, ts_low = 0;
14022 /* Return the next guy to be re-transmitted */
14023 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
14026 if (tp->t_flags & TF_SENTFIN) {
14027 /* retran the end FIN? */
14030 /* ok lets look at this one */
14031 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
14032 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
14035 rsm = rack_find_lowest_rsm(rack);
14040 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) &&
14041 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
14043 * No sack so we automatically do the 3 strikes and
14044 * retransmit (no rack timer would be started).
14049 if (rsm->r_flags & RACK_ACKED) {
14052 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) &&
14053 (rsm->r_dupack < DUP_ACK_THRESHOLD)) {
14054 /* Its not yet ready */
14057 srtt = rack_grab_rtt(tp, rack);
14058 idx = rsm->r_rtr_cnt - 1;
14059 ts_low = (uint32_t)rsm->r_tim_lastsent[idx];
14060 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
14061 if ((tsused == ts_low) ||
14062 (TSTMP_LT(tsused, ts_low))) {
14063 /* No time since sending */
14066 if ((tsused - ts_low) < thresh) {
14067 /* It has not been long enough yet */
14070 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
14071 ((rsm->r_flags & RACK_SACK_PASSED) &&
14072 (rack->sack_attack_disable == 0))) {
14074 * We have passed the dup-ack threshold <or>
14075 * a SACK has indicated this is missing.
14076 * Note that if you are a declared attacker
14077 * it is only the dup-ack threshold that
14078 * will cause retransmits.
14080 /* log retransmit reason */
14081 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1);
14082 rack->r_fast_output = 0;
14089 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
14090 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method,
14091 int line, struct rack_sendmap *rsm)
14093 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
14094 union tcp_log_stackspecific log;
14097 memset(&log, 0, sizeof(log));
14098 log.u_bbr.flex1 = slot;
14099 log.u_bbr.flex2 = len;
14100 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs;
14101 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs;
14102 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss;
14103 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca;
14104 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data;
14105 log.u_bbr.use_lt_bw <<= 1;
14106 log.u_bbr.use_lt_bw |= rack->r_late;
14107 log.u_bbr.use_lt_bw <<= 1;
14108 log.u_bbr.use_lt_bw |= rack->r_early;
14109 log.u_bbr.use_lt_bw <<= 1;
14110 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
14111 log.u_bbr.use_lt_bw <<= 1;
14112 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
14113 log.u_bbr.use_lt_bw <<= 1;
14114 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
14115 log.u_bbr.use_lt_bw <<= 1;
14116 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
14117 log.u_bbr.use_lt_bw <<= 1;
14118 log.u_bbr.use_lt_bw |= rack->gp_ready;
14119 log.u_bbr.pkt_epoch = line;
14120 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed;
14121 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early;
14122 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec;
14123 log.u_bbr.bw_inuse = bw_est;
14124 log.u_bbr.delRate = bw;
14125 if (rack->r_ctl.gp_bw == 0)
14126 log.u_bbr.cur_del_rate = 0;
14128 log.u_bbr.cur_del_rate = rack_get_bw(rack);
14129 log.u_bbr.rttProp = len_time;
14130 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt;
14131 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit;
14132 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
14133 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) {
14134 /* We are in slow start */
14135 log.u_bbr.flex7 = 1;
14137 /* we are on congestion avoidance */
14138 log.u_bbr.flex7 = 0;
14140 log.u_bbr.flex8 = method;
14141 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14142 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14143 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec;
14144 log.u_bbr.cwnd_gain <<= 1;
14145 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
14146 log.u_bbr.cwnd_gain <<= 1;
14147 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
14148 TCP_LOG_EVENTP(rack->rc_tp, NULL,
14149 &rack->rc_inp->inp_socket->so_rcv,
14150 &rack->rc_inp->inp_socket->so_snd,
14151 BBR_LOG_HPTSI_CALC, 0,
14152 0, &log, false, &tv);
14157 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss)
14159 uint32_t new_tso, user_max;
14161 user_max = rack->rc_user_set_max_segs * mss;
14162 if (rack->rc_force_max_seg) {
14165 if (rack->use_fixed_rate &&
14166 ((rack->r_ctl.crte == NULL) ||
14167 (bw != rack->r_ctl.crte->rate))) {
14168 /* Use the user mss since we are not exactly matched */
14171 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL);
14172 if (new_tso > user_max)
14173 new_tso = user_max;
14178 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced)
14180 uint64_t lentim, fill_bw;
14182 /* Lets first see if we are full, if so continue with normal rate */
14183 rack->r_via_fill_cw = 0;
14184 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use)
14186 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd)
14188 if (rack->r_ctl.rc_last_us_rtt == 0)
14190 if (rack->rc_pace_fill_if_rttin_range &&
14191 (rack->r_ctl.rc_last_us_rtt >=
14192 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) {
14193 /* The rtt is huge, N * smallest, lets not fill */
14197 * first lets calculate the b/w based on the last us-rtt
14200 fill_bw = rack->r_ctl.cwnd_to_use;
14201 /* Take the rwnd if its smaller */
14202 if (fill_bw > rack->rc_tp->snd_wnd)
14203 fill_bw = rack->rc_tp->snd_wnd;
14204 if (rack->r_fill_less_agg) {
14206 * Now take away the inflight (this will reduce our
14207 * aggressiveness and yeah, if we get that much out in 1RTT
14208 * we will have had acks come back and still be behind).
14210 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14212 /* Now lets make it into a b/w */
14213 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC;
14214 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt;
14215 /* We are below the min b/w */
14217 *rate_wanted = fill_bw;
14218 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted))
14220 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap))
14221 fill_bw = rack->r_ctl.bw_rate_cap;
14222 rack->r_via_fill_cw = 1;
14223 if (rack->r_rack_hw_rate_caps &&
14224 (rack->r_ctl.crte != NULL)) {
14225 uint64_t high_rate;
14227 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
14228 if (fill_bw > high_rate) {
14229 /* We are capping bw at the highest rate table entry */
14230 if (*rate_wanted > high_rate) {
14231 /* The original rate was also capped */
14232 rack->r_via_fill_cw = 0;
14234 rack_log_hdwr_pacing(rack,
14235 fill_bw, high_rate, __LINE__,
14237 fill_bw = high_rate;
14241 } else if ((rack->r_ctl.crte == NULL) &&
14242 (rack->rack_hdrw_pacing == 0) &&
14243 (rack->rack_hdw_pace_ena) &&
14244 rack->r_rack_hw_rate_caps &&
14245 (rack->rack_attempt_hdwr_pace == 0) &&
14246 (rack->rc_inp->inp_route.ro_nh != NULL) &&
14247 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
14249 * Ok we may have a first attempt that is greater than our top rate
14252 uint64_t high_rate;
14254 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
14256 if (fill_bw > high_rate) {
14257 fill_bw = high_rate;
14264 * Ok fill_bw holds our mythical b/w to fill the cwnd
14265 * in a rtt, what does that time wise equate too?
14267 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC;
14269 *rate_wanted = fill_bw;
14270 if (non_paced || (lentim < slot)) {
14271 rack_log_pacing_delay_calc(rack, len, slot, fill_bw,
14272 0, lentim, 12, __LINE__, NULL);
14273 return ((int32_t)lentim);
14279 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz)
14281 struct rack_sendmap *lrsm;
14283 int can_start_hw_pacing = 1;
14286 if (rack->rc_always_pace == 0) {
14288 * We use the most optimistic possible cwnd/srtt for
14289 * sending calculations. This will make our
14290 * calculation anticipate getting more through
14291 * quicker then possible. But thats ok we don't want
14292 * the peer to have a gap in data sending.
14294 uint32_t srtt, cwnd, tr_perms = 0;
14295 int32_t reduce = 0;
14299 * We keep no precise pacing with the old method
14300 * instead we use the pacer to mitigate bursts.
14302 if (rack->r_ctl.rc_rack_min_rtt)
14303 srtt = rack->r_ctl.rc_rack_min_rtt;
14305 srtt = max(tp->t_srtt, 1);
14306 if (rack->r_ctl.rc_rack_largest_cwnd)
14307 cwnd = rack->r_ctl.rc_rack_largest_cwnd;
14309 cwnd = rack->r_ctl.cwnd_to_use;
14310 /* Inflate cwnd by 1000 so srtt of usecs is in ms */
14311 tr_perms = (cwnd * 1000) / srtt;
14312 if (tr_perms == 0) {
14313 tr_perms = ctf_fixed_maxseg(tp);
14316 * Calculate how long this will take to drain, if
14317 * the calculation comes out to zero, thats ok we
14318 * will use send_a_lot to possibly spin around for
14319 * more increasing tot_len_this_send to the point
14320 * that its going to require a pace, or we hit the
14321 * cwnd. Which in that case we are just waiting for
14324 slot = len / tr_perms;
14325 /* Now do we reduce the time so we don't run dry? */
14326 if (slot && rack_slot_reduction) {
14327 reduce = (slot / rack_slot_reduction);
14328 if (reduce < slot) {
14333 slot *= HPTS_USEC_IN_MSEC;
14336 * We always consider ourselves app limited with old style
14337 * that are not retransmits. This could be the initial
14338 * measurement, but thats ok its all setup and specially
14339 * handled. If another send leaks out, then that too will
14340 * be mark app-limited.
14342 lrsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
14343 if (lrsm && ((lrsm->r_flags & RACK_APP_LIMITED) == 0)) {
14344 rack->r_ctl.rc_first_appl = lrsm;
14345 lrsm->r_flags |= RACK_APP_LIMITED;
14346 rack->r_ctl.rc_app_limited_cnt++;
14349 if (rack->rc_pace_to_cwnd) {
14350 uint64_t rate_wanted = 0;
14352 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1);
14353 rack->rc_ack_can_sendout_data = 1;
14354 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL);
14356 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL);
14358 uint64_t bw_est, res, lentim, rate_wanted;
14359 uint32_t orig_val, srtt, segs, oh;
14363 if ((rack->r_rr_config == 1) && rsm) {
14364 return (rack->r_ctl.rc_min_to);
14366 if (rack->use_fixed_rate) {
14367 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack);
14368 } else if ((rack->r_ctl.init_rate == 0) &&
14369 #ifdef NETFLIX_PEAKRATE
14370 (rack->rc_tp->t_maxpeakrate == 0) &&
14372 (rack->r_ctl.gp_bw == 0)) {
14373 /* no way to yet do an estimate */
14374 bw_est = rate_wanted = 0;
14376 bw_est = rack_get_bw(rack);
14377 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped);
14379 if ((bw_est == 0) || (rate_wanted == 0) ||
14380 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) {
14382 * No way yet to make a b/w estimate or
14383 * our raise is set incorrectly.
14387 /* We need to account for all the overheads */
14388 segs = (len + segsiz - 1) / segsiz;
14390 * We need the diff between 1514 bytes (e-mtu with e-hdr)
14391 * and how much data we put in each packet. Yes this
14392 * means we may be off if we are larger than 1500 bytes
14393 * or smaller. But this just makes us more conservative.
14395 if (rack_hw_rate_min &&
14396 (bw_est < rack_hw_rate_min))
14397 can_start_hw_pacing = 0;
14398 if (ETHERNET_SEGMENT_SIZE > segsiz)
14399 oh = ETHERNET_SEGMENT_SIZE - segsiz;
14403 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC;
14404 res = lentim / rate_wanted;
14405 slot = (uint32_t)res;
14406 orig_val = rack->r_ctl.rc_pace_max_segs;
14407 if (rack->r_ctl.crte == NULL) {
14409 * Only do this if we are not hardware pacing
14410 * since if we are doing hw-pacing below we will
14411 * set make a call after setting up or changing
14414 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
14415 } else if (rack->rc_inp->inp_snd_tag == NULL) {
14417 * We lost our rate somehow, this can happen
14418 * if the interface changed underneath us.
14420 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
14421 rack->r_ctl.crte = NULL;
14422 /* Lets re-allow attempting to setup pacing */
14423 rack->rack_hdrw_pacing = 0;
14424 rack->rack_attempt_hdwr_pace = 0;
14425 rack_log_hdwr_pacing(rack,
14426 rate_wanted, bw_est, __LINE__,
14429 /* Did we change the TSO size, if so log it */
14430 if (rack->r_ctl.rc_pace_max_segs != orig_val)
14431 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL);
14432 prev_fill = rack->r_via_fill_cw;
14433 if ((rack->rc_pace_to_cwnd) &&
14435 (rack->use_fixed_rate == 0) &&
14436 (rack->in_probe_rtt == 0) &&
14437 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) {
14439 * We want to pace at our rate *or* faster to
14440 * fill the cwnd to the max if its not full.
14442 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0);
14444 if ((rack->rc_inp->inp_route.ro_nh != NULL) &&
14445 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
14446 if ((rack->rack_hdw_pace_ena) &&
14447 (can_start_hw_pacing > 0) &&
14448 (rack->rack_hdrw_pacing == 0) &&
14449 (rack->rack_attempt_hdwr_pace == 0)) {
14451 * Lets attempt to turn on hardware pacing
14454 rack->rack_attempt_hdwr_pace = 1;
14455 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp,
14456 rack->rc_inp->inp_route.ro_nh->nh_ifp,
14459 &err, &rack->r_ctl.crte_prev_rate);
14460 if (rack->r_ctl.crte) {
14461 rack->rack_hdrw_pacing = 1;
14462 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz,
14463 0, rack->r_ctl.crte,
14465 rack_log_hdwr_pacing(rack,
14466 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
14468 rack->r_ctl.last_hw_bw_req = rate_wanted;
14470 counter_u64_add(rack_hw_pace_init_fail, 1);
14472 } else if (rack->rack_hdrw_pacing &&
14473 (rack->r_ctl.last_hw_bw_req != rate_wanted)) {
14474 /* Do we need to adjust our rate? */
14475 const struct tcp_hwrate_limit_table *nrte;
14477 if (rack->r_up_only &&
14478 (rate_wanted < rack->r_ctl.crte->rate)) {
14480 * We have four possible states here
14481 * having to do with the previous time
14483 * previous | this-time
14484 * A) 0 | 0 -- fill_cw not in the picture
14485 * B) 1 | 0 -- we were doing a fill-cw but now are not
14486 * C) 1 | 1 -- all rates from fill_cw
14487 * D) 0 | 1 -- we were doing non-fill and now we are filling
14489 * For case A, C and D we don't allow a drop. But for
14490 * case B where we now our on our steady rate we do
14494 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0)))
14497 if ((rate_wanted > rack->r_ctl.crte->rate) ||
14498 (rate_wanted <= rack->r_ctl.crte_prev_rate)) {
14499 if (rack_hw_rate_to_low &&
14500 (bw_est < rack_hw_rate_to_low)) {
14502 * The pacing rate is too low for hardware, but
14503 * do allow hardware pacing to be restarted.
14505 rack_log_hdwr_pacing(rack,
14506 bw_est, rack->r_ctl.crte->rate, __LINE__,
14508 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
14509 rack->r_ctl.crte = NULL;
14510 rack->rack_attempt_hdwr_pace = 0;
14511 rack->rack_hdrw_pacing = 0;
14512 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
14515 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte,
14517 rack->rc_inp->inp_route.ro_nh->nh_ifp,
14520 &err, &rack->r_ctl.crte_prev_rate);
14521 if (nrte == NULL) {
14522 /* Lost the rate */
14523 rack->rack_hdrw_pacing = 0;
14524 rack->r_ctl.crte = NULL;
14525 rack_log_hdwr_pacing(rack,
14526 rate_wanted, 0, __LINE__,
14528 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
14529 counter_u64_add(rack_hw_pace_lost, 1);
14530 } else if (nrte != rack->r_ctl.crte) {
14531 rack->r_ctl.crte = nrte;
14532 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted,
14536 rack_log_hdwr_pacing(rack,
14537 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
14539 rack->r_ctl.last_hw_bw_req = rate_wanted;
14542 /* We just need to adjust the segment size */
14543 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
14544 rack_log_hdwr_pacing(rack,
14545 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
14547 rack->r_ctl.last_hw_bw_req = rate_wanted;
14551 if ((rack->r_ctl.crte != NULL) &&
14552 (rack->r_ctl.crte->rate == rate_wanted)) {
14554 * We need to add a extra if the rates
14555 * are exactly matched. The idea is
14556 * we want the software to make sure the
14557 * queue is empty before adding more, this
14558 * gives us N MSS extra pace times where
14561 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots);
14564 if (rack_limit_time_with_srtt &&
14565 (rack->use_fixed_rate == 0) &&
14566 #ifdef NETFLIX_PEAKRATE
14567 (rack->rc_tp->t_maxpeakrate == 0) &&
14569 (rack->rack_hdrw_pacing == 0)) {
14571 * Sanity check, we do not allow the pacing delay
14572 * to be longer than the SRTT of the path. If it is
14573 * a slow path, then adding a packet should increase
14574 * the RTT and compensate for this i.e. the srtt will
14575 * be greater so the allowed pacing time will be greater.
14577 * Note this restriction is not for where a peak rate
14578 * is set, we are doing fixed pacing or hardware pacing.
14580 if (rack->rc_tp->t_srtt)
14581 srtt = rack->rc_tp->t_srtt;
14583 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */
14585 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL);
14589 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm);
14591 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) {
14593 * If this rate is seeing enobufs when it
14594 * goes to send then either the nic is out
14595 * of gas or we are mis-estimating the time
14596 * somehow and not letting the queue empty
14597 * completely. Lets add to the pacing time.
14599 int hw_boost_delay;
14601 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult;
14602 if (hw_boost_delay > rack_enobuf_hw_max)
14603 hw_boost_delay = rack_enobuf_hw_max;
14604 else if (hw_boost_delay < rack_enobuf_hw_min)
14605 hw_boost_delay = rack_enobuf_hw_min;
14606 slot += hw_boost_delay;
14609 counter_u64_add(rack_calc_nonzero, 1);
14611 counter_u64_add(rack_calc_zero, 1);
14616 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack,
14617 tcp_seq startseq, uint32_t sb_offset)
14619 struct rack_sendmap *my_rsm = NULL;
14620 struct rack_sendmap fe;
14622 if (tp->t_state < TCPS_ESTABLISHED) {
14624 * We don't start any measurements if we are
14625 * not at least established.
14629 tp->t_flags |= TF_GPUTINPROG;
14630 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
14631 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
14632 tp->gput_seq = startseq;
14633 rack->app_limited_needs_set = 0;
14634 if (rack->in_probe_rtt)
14635 rack->measure_saw_probe_rtt = 1;
14636 else if ((rack->measure_saw_probe_rtt) &&
14637 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
14638 rack->measure_saw_probe_rtt = 0;
14639 if (rack->rc_gp_filled)
14640 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
14642 /* Special case initial measurement */
14645 tp->gput_ts = tcp_get_usecs(&tv);
14646 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
14649 * We take a guess out into the future,
14650 * if we have no measurement and no
14651 * initial rate, we measure the first
14652 * initial-windows worth of data to
14653 * speed up getting some GP measurement and
14654 * thus start pacing.
14656 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) {
14657 rack->app_limited_needs_set = 1;
14658 tp->gput_ack = startseq + max(rc_init_window(rack),
14659 (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
14660 rack_log_pacing_delay_calc(rack,
14665 rack->r_ctl.rc_app_limited_cnt,
14672 * We are out somewhere in the sb
14673 * can we use the already outstanding data?
14676 if (rack->r_ctl.rc_app_limited_cnt == 0) {
14678 * Yes first one is good and in this case
14679 * the tp->gput_ts is correctly set based on
14680 * the last ack that arrived (no need to
14681 * set things up when an ack comes in).
14683 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
14684 if ((my_rsm == NULL) ||
14685 (my_rsm->r_rtr_cnt != 1)) {
14686 /* retransmission? */
14690 if (rack->r_ctl.rc_first_appl == NULL) {
14692 * If rc_first_appl is NULL
14693 * then the cnt should be 0.
14694 * This is probably an error, maybe
14695 * a KASSERT would be approprate.
14700 * If we have a marker pointer to the last one that is
14701 * app limited we can use that, but we need to set
14702 * things up so that when it gets ack'ed we record
14703 * the ack time (if its not already acked).
14705 rack->app_limited_needs_set = 1;
14707 * We want to get to the rsm that is either
14708 * next with space i.e. over 1 MSS or the one
14709 * after that (after the app-limited).
14711 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
14712 rack->r_ctl.rc_first_appl);
14714 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp))
14715 /* Have to use the next one */
14716 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
14719 /* Use after the first MSS of it is acked */
14720 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp);
14724 if ((my_rsm == NULL) ||
14725 (my_rsm->r_rtr_cnt != 1)) {
14727 * Either its a retransmit or
14728 * the last is the app-limited one.
14733 tp->gput_seq = my_rsm->r_start;
14735 if (my_rsm->r_flags & RACK_ACKED) {
14737 * This one has been acked use the arrival ack time
14739 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
14740 rack->app_limited_needs_set = 0;
14742 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)];
14743 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
14744 rack_log_pacing_delay_calc(rack,
14749 rack->r_ctl.rc_app_limited_cnt,
14757 * We don't know how long we may have been
14758 * idle or if this is the first-send. Lets
14759 * setup the flag so we will trim off
14760 * the first ack'd data so we get a true
14763 rack->app_limited_needs_set = 1;
14764 tp->gput_ack = startseq + rack_get_measure_window(tp, rack);
14765 /* Find this guy so we can pull the send time */
14766 fe.r_start = startseq;
14767 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
14769 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)];
14770 if (my_rsm->r_flags & RACK_ACKED) {
14772 * Unlikely since its probably what was
14773 * just transmitted (but I am paranoid).
14775 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
14776 rack->app_limited_needs_set = 0;
14778 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) {
14779 /* This also is unlikely */
14780 tp->gput_seq = my_rsm->r_start;
14784 * TSNH unless we have some send-map limit,
14785 * and even at that it should not be hitting
14786 * that limit (we should have stopped sending).
14791 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
14793 rack_log_pacing_delay_calc(rack,
14798 rack->r_ctl.rc_app_limited_cnt,
14799 9, __LINE__, NULL);
14802 static inline uint32_t
14803 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use,
14804 uint32_t avail, int32_t sb_offset)
14809 if (tp->snd_wnd > cwnd_to_use)
14810 sendwin = cwnd_to_use;
14812 sendwin = tp->snd_wnd;
14813 if (ctf_outstanding(tp) >= tp->snd_wnd) {
14814 /* We never want to go over our peers rcv-window */
14819 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
14820 if (flight >= sendwin) {
14822 * We have in flight what we are allowed by cwnd (if
14823 * it was rwnd blocking it would have hit above out
14828 len = sendwin - flight;
14829 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) {
14830 /* We would send too much (beyond the rwnd) */
14831 len = tp->snd_wnd - ctf_outstanding(tp);
14833 if ((len + sb_offset) > avail) {
14835 * We don't have that much in the SB, how much is
14838 len = avail - sb_offset;
14845 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags,
14846 unsigned ipoptlen, int32_t orig_len, int32_t len, int error,
14847 int rsm_is_null, int optlen, int line, uint16_t mode)
14849 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
14850 union tcp_log_stackspecific log;
14853 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
14854 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
14855 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
14856 log.u_bbr.flex1 = error;
14857 log.u_bbr.flex2 = flags;
14858 log.u_bbr.flex3 = rsm_is_null;
14859 log.u_bbr.flex4 = ipoptlen;
14860 log.u_bbr.flex5 = tp->rcv_numsacks;
14861 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
14862 log.u_bbr.flex7 = optlen;
14863 log.u_bbr.flex8 = rack->r_fsb_inited;
14864 log.u_bbr.applimited = rack->r_fast_output;
14865 log.u_bbr.bw_inuse = rack_get_bw(rack);
14866 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
14867 log.u_bbr.cwnd_gain = mode;
14868 log.u_bbr.pkts_out = orig_len;
14869 log.u_bbr.lt_epoch = len;
14870 log.u_bbr.delivered = line;
14871 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14872 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14873 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0,
14874 len, &log, false, NULL, NULL, 0, &tv);
14879 static struct mbuf *
14880 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen,
14881 struct rack_fast_send_blk *fsb,
14882 int32_t seglimit, int32_t segsize, int hw_tls)
14885 struct ktls_session *tls, *ntls;
14886 struct mbuf *start;
14888 struct mbuf *m, *n, **np, *smb;
14891 int32_t len = *plen;
14893 int32_t len_cp = 0;
14894 uint32_t mlen, frags;
14896 soff = off = the_off;
14901 if (hw_tls && (m->m_flags & M_EXTPG))
14902 tls = m->m_epg_tls;
14914 if (m->m_flags & M_EXTPG)
14915 ntls = m->m_epg_tls;
14920 * Avoid mixing TLS records with handshake
14921 * data or TLS records from different
14931 mlen = min(len, m->m_len - off);
14934 * For M_EXTPG mbufs, add 3 segments
14935 * + 1 in case we are crossing page boundaries
14936 * + 2 in case the TLS hdr/trailer are used
14937 * It is cheaper to just add the segments
14938 * than it is to take the cache miss to look
14939 * at the mbuf ext_pgs state in detail.
14941 if (m->m_flags & M_EXTPG) {
14942 fragsize = min(segsize, PAGE_SIZE);
14945 fragsize = segsize;
14949 /* Break if we really can't fit anymore. */
14950 if ((frags + 1) >= seglimit) {
14956 * Reduce size if you can't copy the whole
14957 * mbuf. If we can't copy the whole mbuf, also
14958 * adjust len so the loop will end after this
14961 if ((frags + howmany(mlen, fragsize)) >= seglimit) {
14962 mlen = (seglimit - frags - 1) * fragsize;
14964 *plen = len_cp + len;
14966 frags += howmany(mlen, fragsize);
14970 KASSERT(seglimit > 0,
14971 ("%s: seglimit went too low", __func__));
14973 n = m_get(M_NOWAIT, m->m_type);
14979 len_cp += n->m_len;
14980 if (m->m_flags & (M_EXT|M_EXTPG)) {
14981 n->m_data = m->m_data + off;
14984 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
14991 if (len || (soff == smb->m_len)) {
14993 * We have more so we move forward or
14994 * we have consumed the entire mbuf and
14995 * len has fell to 0.
15007 * Save off the size of the mbuf. We do
15008 * this so that we can recognize when it
15009 * has been trimmed by sbcut() as acks
15012 fsb->o_m_len = smb->m_len;
15015 * This is the case where the next mbuf went to NULL. This
15016 * means with this copy we have sent everything in the sb.
15017 * In theory we could clear the fast_output flag, but lets
15018 * not since its possible that we could get more added
15019 * and acks that call the extend function which would let
15034 * This is a copy of m_copym(), taking the TSO segment size/limit
15035 * constraints into account, and advancing the sndptr as it goes.
15037 static struct mbuf *
15038 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen,
15039 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff)
15041 struct mbuf *m, *n;
15044 soff = rack->r_ctl.fsb.off;
15045 m = rack->r_ctl.fsb.m;
15046 if (rack->r_ctl.fsb.o_m_len != m->m_len) {
15048 * The mbuf had the front of it chopped off by an ack
15049 * we need to adjust the soff/off by that difference.
15053 delta = rack->r_ctl.fsb.o_m_len - m->m_len;
15056 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff));
15057 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen));
15058 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?",
15060 rack, *plen, m, m->m_len));
15061 /* Save off the right location before we copy and advance */
15063 *s_mb = rack->r_ctl.fsb.m;
15064 n = rack_fo_base_copym(m, soff, plen,
15066 seglimit, segsize, rack->r_ctl.fsb.hw_tls);
15071 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm,
15072 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len)
15075 * Enter the fast retransmit path. We are given that a sched_pin is
15076 * in place (if accounting is compliled in) and the cycle count taken
15077 * at the entry is in the ts_val. The concept her is that the rsm
15078 * now holds the mbuf offsets and such so we can directly transmit
15079 * without a lot of overhead, the len field is already set for
15080 * us to prohibit us from sending too much (usually its 1MSS).
15082 struct ip *ip = NULL;
15083 struct udphdr *udp = NULL;
15084 struct tcphdr *th = NULL;
15085 struct mbuf *m = NULL;
15088 struct tcp_log_buffer *lgb;
15089 #ifdef TCP_ACCOUNTING
15095 u_char opt[TCP_MAXOLEN];
15096 uint32_t hdrlen, optlen;
15097 int32_t slot, segsiz, max_val, tso = 0, error, flags, ulen = 0;
15099 uint32_t if_hw_tsomaxsegcount = 0, startseq;
15100 uint32_t if_hw_tsomaxsegsize;
15103 struct ip6_hdr *ip6 = NULL;
15105 if (rack->r_is_v6) {
15106 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
15107 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
15111 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
15112 hdrlen = sizeof(struct tcpiphdr);
15114 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
15117 if (rsm->r_flags & RACK_TLP)
15119 startseq = rsm->r_start;
15120 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
15121 inp = rack->rc_inp;
15123 flags = tcp_outflags[tp->t_state];
15124 if (flags & (TH_SYN|TH_RST)) {
15127 if (rsm->r_flags & RACK_HAS_FIN) {
15128 /* We can't send a FIN here */
15131 if (flags & TH_FIN) {
15132 /* We never send a FIN */
15135 if (tp->t_flags & TF_RCVD_TSTMP) {
15136 to.to_tsval = ms_cts + tp->ts_offset;
15137 to.to_tsecr = tp->ts_recent;
15138 to.to_flags = TOF_TS;
15140 optlen = tcp_addoptions(&to, opt);
15142 udp = rack->r_ctl.fsb.udp;
15144 hdrlen += sizeof(struct udphdr);
15145 if (rack->r_ctl.rc_pace_max_segs)
15146 max_val = rack->r_ctl.rc_pace_max_segs;
15147 else if (rack->rc_user_set_max_segs)
15148 max_val = rack->rc_user_set_max_segs * segsiz;
15151 if ((tp->t_flags & TF_TSO) &&
15157 if (MHLEN < hdrlen + max_linkhdr)
15158 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
15161 m = m_gethdr(M_NOWAIT, MT_DATA);
15164 m->m_data += max_linkhdr;
15166 th = rack->r_ctl.fsb.th;
15167 /* Establish the len to send */
15170 if ((tso) && (len + optlen > tp->t_maxseg)) {
15171 uint32_t if_hw_tsomax;
15174 /* extract TSO information */
15175 if_hw_tsomax = tp->t_tsomax;
15176 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
15177 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
15179 * Check if we should limit by maximum payload
15182 if (if_hw_tsomax != 0) {
15183 /* compute maximum TSO length */
15184 max_len = (if_hw_tsomax - hdrlen -
15186 if (max_len <= 0) {
15188 } else if (len > max_len) {
15192 if (len <= segsiz) {
15194 * In case there are too many small fragments don't
15202 if ((tso == 0) && (len > segsiz))
15204 us_cts = tcp_get_usecs(tv);
15206 (len <= MHLEN - hdrlen - max_linkhdr)) {
15209 th->th_seq = htonl(rsm->r_start);
15210 th->th_ack = htonl(tp->rcv_nxt);
15212 * The PUSH bit should only be applied
15213 * if the full retransmission is made. If
15214 * we are sending less than this is the
15215 * left hand edge and should not have
15218 if ((rsm->r_flags & RACK_HAD_PUSH) &&
15219 (len == (rsm->r_end - rsm->r_start)))
15221 th->th_flags = flags;
15222 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
15223 if (th->th_win == 0) {
15224 tp->t_sndzerowin++;
15225 tp->t_flags |= TF_RXWIN0SENT;
15227 tp->t_flags &= ~TF_RXWIN0SENT;
15228 if (rsm->r_flags & RACK_TLP) {
15230 * TLP should not count in retran count, but
15233 counter_u64_add(rack_tlp_retran, 1);
15234 counter_u64_add(rack_tlp_retran_bytes, len);
15236 tp->t_sndrexmitpack++;
15237 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
15238 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
15241 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
15244 if (rsm->m == NULL)
15246 if (rsm->orig_m_len != rsm->m->m_len) {
15247 /* Fix up the orig_m_len and possibly the mbuf offset */
15248 rack_adjust_orig_mlen(rsm);
15250 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls);
15251 if (len <= segsiz) {
15253 * Must have ran out of mbufs for the copy
15254 * shorten it to no longer need tso. Lets
15255 * not put on sendalot since we are low on
15260 if ((m->m_next == NULL) || (len <= 0)){
15265 ulen = hdrlen + len - sizeof(struct ip6_hdr);
15267 ulen = hdrlen + len - sizeof(struct ip);
15268 udp->uh_ulen = htons(ulen);
15270 m->m_pkthdr.rcvif = (struct ifnet *)0;
15271 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
15273 if (rack->r_is_v6) {
15275 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
15276 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15277 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
15278 th->th_sum = htons(0);
15279 UDPSTAT_INC(udps_opackets);
15281 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
15282 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15283 th->th_sum = in6_cksum_pseudo(ip6,
15284 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
15289 #if defined(INET6) && defined(INET)
15295 m->m_pkthdr.csum_flags = CSUM_UDP;
15296 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15297 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
15298 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
15299 th->th_sum = htons(0);
15300 UDPSTAT_INC(udps_opackets);
15302 m->m_pkthdr.csum_flags = CSUM_TCP;
15303 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15304 th->th_sum = in_pseudo(ip->ip_src.s_addr,
15305 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
15306 IPPROTO_TCP + len + optlen));
15308 /* IP version must be set here for ipv4/ipv6 checking later */
15309 KASSERT(ip->ip_v == IPVERSION,
15310 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
15314 KASSERT(len > tp->t_maxseg - optlen,
15315 ("%s: len <= tso_segsz tp:%p", __func__, tp));
15316 m->m_pkthdr.csum_flags |= CSUM_TSO;
15317 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
15320 if (rack->r_is_v6) {
15321 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
15322 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
15323 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
15324 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15326 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15329 #if defined(INET) && defined(INET6)
15334 ip->ip_len = htons(m->m_pkthdr.len);
15335 ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
15336 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
15337 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15338 if (tp->t_port == 0 || len < V_tcp_minmss) {
15339 ip->ip_off |= htons(IP_DF);
15342 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15346 /* Time to copy in our header */
15347 cpto = mtod(m, uint8_t *);
15348 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
15349 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
15351 bcopy(opt, th + 1, optlen);
15352 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
15354 th->th_off = sizeof(struct tcphdr) >> 2;
15356 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
15357 union tcp_log_stackspecific log;
15359 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15360 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
15361 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
15362 if (rack->rack_no_prr)
15363 log.u_bbr.flex1 = 0;
15365 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
15366 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
15367 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
15368 log.u_bbr.flex4 = max_val;
15369 log.u_bbr.flex5 = 0;
15370 /* Save off the early/late values */
15371 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
15372 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
15373 log.u_bbr.bw_inuse = rack_get_bw(rack);
15374 log.u_bbr.flex8 = 1;
15375 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
15376 log.u_bbr.flex7 = 55;
15377 log.u_bbr.pkts_out = tp->t_maxseg;
15378 log.u_bbr.timeStamp = cts;
15379 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
15380 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
15381 log.u_bbr.delivered = 0;
15382 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
15383 len, &log, false, NULL, NULL, 0, tv);
15387 if (rack->r_is_v6) {
15388 error = ip6_output(m, NULL,
15390 0, NULL, NULL, inp);
15393 #if defined(INET) && defined(INET6)
15398 error = ip_output(m, NULL,
15405 lgb->tlb_errno = error;
15411 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv),
15412 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls);
15413 if (doing_tlp && (rack->fast_rsm_hack == 0)) {
15414 rack->rc_tlp_in_progress = 1;
15415 rack->r_ctl.rc_tlp_cnt_out++;
15418 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls);
15419 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
15420 rack->forced_ack = 0; /* If we send something zap the FA flag */
15421 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
15422 rack->r_ctl.retran_during_recovery += len;
15426 idx = (len / segsiz) + 3;
15427 if (idx >= TCP_MSS_ACCT_ATIMER)
15428 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
15430 counter_u64_add(rack_out_size[idx], 1);
15432 if (tp->t_rtttime == 0) {
15433 tp->t_rtttime = ticks;
15434 tp->t_rtseq = startseq;
15435 KMOD_TCPSTAT_INC(tcps_segstimed);
15437 counter_u64_add(rack_fto_rsm_send, 1);
15438 if (error && (error == ENOBUFS)) {
15439 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
15440 if (rack->rc_enobuf < 0x7f)
15442 if (slot < (10 * HPTS_USEC_IN_MSEC))
15443 slot = 10 * HPTS_USEC_IN_MSEC;
15445 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz);
15447 (rack->rc_always_pace == 0) ||
15448 (rack->r_rr_config == 1)) {
15450 * We have no pacing set or we
15451 * are using old-style rack or
15452 * we are overriden to use the old 1ms pacing.
15454 slot = rack->r_ctl.rc_min_to;
15456 rack_start_hpts_timer(rack, tp, cts, slot, len, 0);
15457 if (rack->r_must_retran) {
15458 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
15459 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
15461 * We have retransmitted all we need.
15463 rack->r_must_retran = 0;
15464 rack->r_ctl.rc_out_at_rto = 0;
15467 #ifdef TCP_ACCOUNTING
15468 crtsc = get_cyclecount();
15469 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15470 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
15472 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru);
15473 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15474 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
15476 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val));
15477 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15478 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz);
15480 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz));
15491 rack_sndbuf_autoscale(struct tcp_rack *rack)
15494 * Automatic sizing of send socket buffer. Often the send buffer
15495 * size is not optimally adjusted to the actual network conditions
15496 * at hand (delay bandwidth product). Setting the buffer size too
15497 * small limits throughput on links with high bandwidth and high
15498 * delay (eg. trans-continental/oceanic links). Setting the
15499 * buffer size too big consumes too much real kernel memory,
15500 * especially with many connections on busy servers.
15502 * The criteria to step up the send buffer one notch are:
15503 * 1. receive window of remote host is larger than send buffer
15504 * (with a fudge factor of 5/4th);
15505 * 2. send buffer is filled to 7/8th with data (so we actually
15506 * have data to make use of it);
15507 * 3. send buffer fill has not hit maximal automatic size;
15508 * 4. our send window (slow start and cogestion controlled) is
15509 * larger than sent but unacknowledged data in send buffer.
15511 * Note that the rack version moves things much faster since
15512 * we want to avoid hitting cache lines in the rack_fast_output()
15513 * path so this is called much less often and thus moves
15514 * the SB forward by a percentage.
15518 uint32_t sendwin, scaleup;
15521 so = rack->rc_inp->inp_socket;
15522 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd);
15523 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
15524 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
15525 sbused(&so->so_snd) >=
15526 (so->so_snd.sb_hiwat / 8 * 7) &&
15527 sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
15528 sendwin >= (sbused(&so->so_snd) -
15529 (tp->snd_nxt - tp->snd_una))) {
15530 if (rack_autosndbuf_inc)
15531 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100;
15533 scaleup = V_tcp_autosndbuf_inc;
15534 if (scaleup < V_tcp_autosndbuf_inc)
15535 scaleup = V_tcp_autosndbuf_inc;
15536 scaleup += so->so_snd.sb_hiwat;
15537 if (scaleup > V_tcp_autosndbuf_max)
15538 scaleup = V_tcp_autosndbuf_max;
15539 if (!sbreserve_locked(&so->so_snd, scaleup, so, curthread))
15540 so->so_snd.sb_flags &= ~SB_AUTOSIZE;
15546 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val,
15547 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err)
15550 * Enter to do fast output. We are given that the sched_pin is
15551 * in place (if accounting is compiled in) and the cycle count taken
15552 * at entry is in place in ts_val. The idea here is that
15553 * we know how many more bytes needs to be sent (presumably either
15554 * during pacing or to fill the cwnd and that was greater than
15555 * the max-burst). We have how much to send and all the info we
15556 * need to just send.
15558 struct ip *ip = NULL;
15559 struct udphdr *udp = NULL;
15560 struct tcphdr *th = NULL;
15561 struct mbuf *m, *s_mb;
15564 struct tcp_log_buffer *lgb;
15565 #ifdef TCP_ACCOUNTING
15569 u_char opt[TCP_MAXOLEN];
15570 uint32_t hdrlen, optlen;
15572 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, flags, ulen = 0;
15573 uint32_t us_cts, s_soff;
15574 uint32_t if_hw_tsomaxsegcount = 0, startseq;
15575 uint32_t if_hw_tsomaxsegsize;
15576 uint16_t add_flag = RACK_SENT_FP;
15578 struct ip6_hdr *ip6 = NULL;
15580 if (rack->r_is_v6) {
15581 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
15582 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
15586 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
15587 hdrlen = sizeof(struct tcpiphdr);
15589 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
15593 startseq = tp->snd_max;
15594 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
15595 inp = rack->rc_inp;
15596 len = rack->r_ctl.fsb.left_to_send;
15598 flags = rack->r_ctl.fsb.tcp_flags;
15599 if (tp->t_flags & TF_RCVD_TSTMP) {
15600 to.to_tsval = ms_cts + tp->ts_offset;
15601 to.to_tsecr = tp->ts_recent;
15602 to.to_flags = TOF_TS;
15604 optlen = tcp_addoptions(&to, opt);
15606 udp = rack->r_ctl.fsb.udp;
15608 hdrlen += sizeof(struct udphdr);
15609 if (rack->r_ctl.rc_pace_max_segs)
15610 max_val = rack->r_ctl.rc_pace_max_segs;
15611 else if (rack->rc_user_set_max_segs)
15612 max_val = rack->rc_user_set_max_segs * segsiz;
15615 if ((tp->t_flags & TF_TSO) &&
15622 if (MHLEN < hdrlen + max_linkhdr)
15623 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
15626 m = m_gethdr(M_NOWAIT, MT_DATA);
15629 m->m_data += max_linkhdr;
15631 th = rack->r_ctl.fsb.th;
15632 /* Establish the len to send */
15635 if ((tso) && (len + optlen > tp->t_maxseg)) {
15636 uint32_t if_hw_tsomax;
15639 /* extract TSO information */
15640 if_hw_tsomax = tp->t_tsomax;
15641 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
15642 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
15644 * Check if we should limit by maximum payload
15647 if (if_hw_tsomax != 0) {
15648 /* compute maximum TSO length */
15649 max_len = (if_hw_tsomax - hdrlen -
15651 if (max_len <= 0) {
15653 } else if (len > max_len) {
15657 if (len <= segsiz) {
15659 * In case there are too many small fragments don't
15667 if ((tso == 0) && (len > segsiz))
15669 us_cts = tcp_get_usecs(tv);
15671 (len <= MHLEN - hdrlen - max_linkhdr)) {
15674 sb_offset = tp->snd_max - tp->snd_una;
15675 th->th_seq = htonl(tp->snd_max);
15676 th->th_ack = htonl(tp->rcv_nxt);
15677 th->th_flags = flags;
15678 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
15679 if (th->th_win == 0) {
15680 tp->t_sndzerowin++;
15681 tp->t_flags |= TF_RXWIN0SENT;
15683 tp->t_flags &= ~TF_RXWIN0SENT;
15684 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
15685 KMOD_TCPSTAT_INC(tcps_sndpack);
15686 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
15688 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
15691 if (rack->r_ctl.fsb.m == NULL)
15694 /* s_mb and s_soff are saved for rack_log_output */
15695 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize,
15697 if (len <= segsiz) {
15699 * Must have ran out of mbufs for the copy
15700 * shorten it to no longer need tso. Lets
15701 * not put on sendalot since we are low on
15706 if (rack->r_ctl.fsb.rfo_apply_push &&
15707 (len == rack->r_ctl.fsb.left_to_send)) {
15708 th->th_flags |= TH_PUSH;
15709 add_flag |= RACK_HAD_PUSH;
15711 if ((m->m_next == NULL) || (len <= 0)){
15716 ulen = hdrlen + len - sizeof(struct ip6_hdr);
15718 ulen = hdrlen + len - sizeof(struct ip);
15719 udp->uh_ulen = htons(ulen);
15721 m->m_pkthdr.rcvif = (struct ifnet *)0;
15722 if (tp->t_state == TCPS_ESTABLISHED &&
15723 (tp->t_flags2 & TF2_ECN_PERMIT)) {
15725 * If the peer has ECN, mark data packets with ECN capable
15726 * transmission (ECT). Ignore pure ack packets,
15729 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max)) {
15732 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
15735 ip->ip_tos |= IPTOS_ECN_ECT0;
15736 KMOD_TCPSTAT_INC(tcps_ecn_ect0);
15738 * Reply with proper ECN notifications.
15739 * Only set CWR on new data segments.
15741 if (tp->t_flags2 & TF2_ECN_SND_CWR) {
15743 tp->t_flags2 &= ~TF2_ECN_SND_CWR;
15746 if (tp->t_flags2 & TF2_ECN_SND_ECE)
15749 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
15751 if (rack->r_is_v6) {
15753 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
15754 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15755 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
15756 th->th_sum = htons(0);
15757 UDPSTAT_INC(udps_opackets);
15759 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
15760 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15761 th->th_sum = in6_cksum_pseudo(ip6,
15762 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
15767 #if defined(INET6) && defined(INET)
15773 m->m_pkthdr.csum_flags = CSUM_UDP;
15774 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15775 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
15776 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
15777 th->th_sum = htons(0);
15778 UDPSTAT_INC(udps_opackets);
15780 m->m_pkthdr.csum_flags = CSUM_TCP;
15781 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15782 th->th_sum = in_pseudo(ip->ip_src.s_addr,
15783 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
15784 IPPROTO_TCP + len + optlen));
15786 /* IP version must be set here for ipv4/ipv6 checking later */
15787 KASSERT(ip->ip_v == IPVERSION,
15788 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
15792 KASSERT(len > tp->t_maxseg - optlen,
15793 ("%s: len <= tso_segsz tp:%p", __func__, tp));
15794 m->m_pkthdr.csum_flags |= CSUM_TSO;
15795 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
15798 if (rack->r_is_v6) {
15799 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
15800 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
15801 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
15802 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15804 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15807 #if defined(INET) && defined(INET6)
15812 ip->ip_len = htons(m->m_pkthdr.len);
15813 ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
15814 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
15815 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15816 if (tp->t_port == 0 || len < V_tcp_minmss) {
15817 ip->ip_off |= htons(IP_DF);
15820 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15824 /* Time to copy in our header */
15825 cpto = mtod(m, uint8_t *);
15826 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
15827 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
15829 bcopy(opt, th + 1, optlen);
15830 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
15832 th->th_off = sizeof(struct tcphdr) >> 2;
15834 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
15835 union tcp_log_stackspecific log;
15837 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15838 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
15839 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
15840 if (rack->rack_no_prr)
15841 log.u_bbr.flex1 = 0;
15843 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
15844 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
15845 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
15846 log.u_bbr.flex4 = max_val;
15847 log.u_bbr.flex5 = 0;
15848 /* Save off the early/late values */
15849 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
15850 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
15851 log.u_bbr.bw_inuse = rack_get_bw(rack);
15852 log.u_bbr.flex8 = 0;
15853 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
15854 log.u_bbr.flex7 = 44;
15855 log.u_bbr.pkts_out = tp->t_maxseg;
15856 log.u_bbr.timeStamp = cts;
15857 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
15858 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
15859 log.u_bbr.delivered = 0;
15860 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
15861 len, &log, false, NULL, NULL, 0, tv);
15865 if (rack->r_is_v6) {
15866 error = ip6_output(m, NULL,
15868 0, NULL, NULL, inp);
15871 #if defined(INET) && defined(INET6)
15876 error = ip_output(m, NULL,
15882 lgb->tlb_errno = error;
15890 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv),
15891 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls);
15893 if (tp->snd_una == tp->snd_max) {
15894 rack->r_ctl.rc_tlp_rxt_last_time = cts;
15895 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
15896 tp->t_acktime = ticks;
15899 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls);
15901 rack->forced_ack = 0; /* If we send something zap the FA flag */
15903 if ((tp->t_flags & TF_GPUTINPROG) == 0)
15904 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset);
15905 tp->snd_max += len;
15906 tp->snd_nxt = tp->snd_max;
15910 idx = (len / segsiz) + 3;
15911 if (idx >= TCP_MSS_ACCT_ATIMER)
15912 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
15914 counter_u64_add(rack_out_size[idx], 1);
15916 if (len <= rack->r_ctl.fsb.left_to_send)
15917 rack->r_ctl.fsb.left_to_send -= len;
15919 rack->r_ctl.fsb.left_to_send = 0;
15920 if (rack->r_ctl.fsb.left_to_send < segsiz) {
15921 rack->r_fast_output = 0;
15922 rack->r_ctl.fsb.left_to_send = 0;
15923 /* At the end of fast_output scale up the sb */
15924 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd);
15925 rack_sndbuf_autoscale(rack);
15926 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd);
15928 if (tp->t_rtttime == 0) {
15929 tp->t_rtttime = ticks;
15930 tp->t_rtseq = startseq;
15931 KMOD_TCPSTAT_INC(tcps_segstimed);
15933 if ((rack->r_ctl.fsb.left_to_send >= segsiz) &&
15938 th = rack->r_ctl.fsb.th;
15942 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
15943 counter_u64_add(rack_fto_send, 1);
15944 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz);
15945 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0);
15946 #ifdef TCP_ACCOUNTING
15947 crtsc = get_cyclecount();
15948 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15949 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
15951 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru);
15952 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15953 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
15955 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val));
15956 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15957 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz);
15959 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz));
15966 rack->r_fast_output = 0;
15971 rack_output(struct tcpcb *tp)
15975 uint32_t sb_offset, s_moff = 0;
15976 int32_t len, flags, error = 0;
15977 struct mbuf *m, *s_mb = NULL;
15979 uint32_t if_hw_tsomaxsegcount = 0;
15980 uint32_t if_hw_tsomaxsegsize;
15981 int32_t segsiz, minseg;
15982 long tot_len_this_send = 0;
15984 struct ip *ip = NULL;
15987 struct ipovly *ipov = NULL;
15989 struct udphdr *udp = NULL;
15990 struct tcp_rack *rack;
15994 uint8_t wanted_cookie = 0;
15995 u_char opt[TCP_MAXOLEN];
15996 unsigned ipoptlen, optlen, hdrlen, ulen=0;
15999 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
16000 unsigned ipsec_optlen = 0;
16003 int32_t idle, sendalot;
16004 int32_t sub_from_prr = 0;
16005 volatile int32_t sack_rxmit;
16006 struct rack_sendmap *rsm = NULL;
16010 int32_t sup_rack = 0;
16011 uint32_t cts, ms_cts, delayed, early;
16012 uint16_t add_flag = RACK_SENT_SP;
16013 uint8_t hpts_calling, doing_tlp = 0;
16014 uint32_t cwnd_to_use, pace_max_seg;
16015 int32_t do_a_prefetch = 0;
16016 int32_t prefetch_rsm = 0;
16017 int32_t orig_len = 0;
16019 int32_t prefetch_so_done = 0;
16020 struct tcp_log_buffer *lgb;
16022 struct sockbuf *sb;
16023 uint64_t ts_val = 0;
16024 #ifdef TCP_ACCOUNTING
16028 struct ip6_hdr *ip6 = NULL;
16031 uint8_t filled_all = 0;
16032 bool hw_tls = false;
16034 /* setup and take the cache hits here */
16035 rack = (struct tcp_rack *)tp->t_fb_ptr;
16036 #ifdef TCP_ACCOUNTING
16038 ts_val = get_cyclecount();
16040 hpts_calling = rack->rc_inp->inp_hpts_calls;
16041 NET_EPOCH_ASSERT();
16042 INP_WLOCK_ASSERT(rack->rc_inp);
16044 if (tp->t_flags & TF_TOE) {
16045 #ifdef TCP_ACCOUNTING
16048 return (tcp_offload_output(tp));
16052 * For TFO connections in SYN_RECEIVED, only allow the initial
16053 * SYN|ACK and those sent by the retransmit timer.
16055 if (IS_FASTOPEN(tp->t_flags) &&
16056 (tp->t_state == TCPS_SYN_RECEIVED) &&
16057 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */
16058 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */
16059 #ifdef TCP_ACCOUNTING
16065 if (rack->r_state) {
16066 /* Use the cache line loaded if possible */
16067 isipv6 = rack->r_is_v6;
16069 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0;
16073 cts = tcp_get_usecs(&tv);
16074 ms_cts = tcp_tv_to_mssectick(&tv);
16075 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
16076 rack->rc_inp->inp_in_hpts) {
16078 * We are on the hpts for some timer but not hptsi output.
16079 * Remove from the hpts unconditionally.
16081 rack_timer_cancel(tp, rack, cts, __LINE__);
16083 /* Are we pacing and late? */
16084 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
16085 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) {
16086 /* We are delayed */
16087 delayed = cts - rack->r_ctl.rc_last_output_to;
16091 /* Do the timers, which may override the pacer */
16092 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
16093 if (rack_process_timers(tp, rack, cts, hpts_calling)) {
16094 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
16095 #ifdef TCP_ACCOUNTING
16101 if (rack->rc_in_persist) {
16102 if (rack->rc_inp->inp_in_hpts == 0) {
16103 /* Timer is not running */
16104 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
16106 #ifdef TCP_ACCOUNTING
16111 if ((rack->r_timer_override) ||
16112 (rack->rc_ack_can_sendout_data) ||
16114 (tp->t_state < TCPS_ESTABLISHED)) {
16115 rack->rc_ack_can_sendout_data = 0;
16116 if (rack->rc_inp->inp_in_hpts)
16117 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
16118 } else if (rack->rc_inp->inp_in_hpts) {
16120 * On the hpts you can't pass even if ACKNOW is on, we will
16121 * when the hpts fires.
16123 #ifdef TCP_ACCOUNTING
16124 crtsc = get_cyclecount();
16125 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16126 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val);
16128 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val));
16129 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16130 tp->tcp_cnt_counters[SND_BLOCKED]++;
16132 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1);
16135 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
16138 rack->rc_inp->inp_hpts_calls = 0;
16139 /* Finish out both pacing early and late accounting */
16140 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
16141 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) {
16142 early = rack->r_ctl.rc_last_output_to - cts;
16146 rack->r_ctl.rc_agg_delayed += delayed;
16148 } else if (early) {
16149 rack->r_ctl.rc_agg_early += early;
16152 /* Now that early/late accounting is done turn off the flag */
16153 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
16154 rack->r_wanted_output = 0;
16155 rack->r_timer_override = 0;
16156 if ((tp->t_state != rack->r_state) &&
16157 TCPS_HAVEESTABLISHED(tp->t_state)) {
16158 rack_set_state(tp, rack);
16160 if ((rack->r_fast_output) &&
16161 (tp->rcv_numsacks == 0)) {
16165 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
16169 inp = rack->rc_inp;
16170 so = inp->inp_socket;
16175 inp = rack->rc_inp;
16177 * For TFO connections in SYN_SENT or SYN_RECEIVED,
16178 * only allow the initial SYN or SYN|ACK and those sent
16179 * by the retransmit timer.
16181 if (IS_FASTOPEN(tp->t_flags) &&
16182 ((tp->t_state == TCPS_SYN_RECEIVED) ||
16183 (tp->t_state == TCPS_SYN_SENT)) &&
16184 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */
16185 (tp->t_rxtshift == 0)) { /* not a retransmit */
16186 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
16187 so = inp->inp_socket;
16189 goto just_return_nolock;
16192 * Determine length of data that should be transmitted, and flags
16193 * that will be used. If there is some data or critical controls
16194 * (SYN, RST) to send, then transmit; otherwise, investigate
16197 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
16198 if (tp->t_idle_reduce) {
16199 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur))
16200 rack_cc_after_idle(rack, tp);
16202 tp->t_flags &= ~TF_LASTIDLE;
16204 if (tp->t_flags & TF_MORETOCOME) {
16205 tp->t_flags |= TF_LASTIDLE;
16209 if ((tp->snd_una == tp->snd_max) &&
16210 rack->r_ctl.rc_went_idle_time &&
16211 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) {
16212 idle = cts - rack->r_ctl.rc_went_idle_time;
16213 if (idle > rack_min_probertt_hold) {
16214 /* Count as a probe rtt */
16215 if (rack->in_probe_rtt == 0) {
16216 rack->r_ctl.rc_lower_rtt_us_cts = cts;
16217 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
16218 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
16219 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
16221 rack_exit_probertt(rack, cts);
16226 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED))
16227 rack_init_fsb_block(tp, rack);
16230 * If we've recently taken a timeout, snd_max will be greater than
16231 * snd_nxt. There may be SACK information that allows us to avoid
16232 * resending already delivered data. Adjust snd_nxt accordingly.
16235 cts = tcp_get_usecs(&tv);
16236 ms_cts = tcp_tv_to_mssectick(&tv);
16239 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
16241 if (rack->r_ctl.rc_pace_max_segs == 0)
16242 pace_max_seg = rack->rc_user_set_max_segs * segsiz;
16244 pace_max_seg = rack->r_ctl.rc_pace_max_segs;
16245 sb_offset = tp->snd_max - tp->snd_una;
16246 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
16247 flags = tcp_outflags[tp->t_state];
16248 while (rack->rc_free_cnt < rack_free_cache) {
16249 rsm = rack_alloc(rack);
16251 if (inp->inp_hpts_calls)
16252 /* Retry in a ms */
16253 slot = (1 * HPTS_USEC_IN_MSEC);
16254 so = inp->inp_socket;
16256 goto just_return_nolock;
16258 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
16259 rack->rc_free_cnt++;
16262 if (inp->inp_hpts_calls)
16263 inp->inp_hpts_calls = 0;
16267 if (flags & TH_RST) {
16268 SOCKBUF_LOCK(&inp->inp_socket->so_snd);
16269 so = inp->inp_socket;
16273 if (rack->r_ctl.rc_resend) {
16274 /* Retransmit timer */
16275 rsm = rack->r_ctl.rc_resend;
16276 rack->r_ctl.rc_resend = NULL;
16277 rsm->r_flags &= ~RACK_TLP;
16278 len = rsm->r_end - rsm->r_start;
16281 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
16282 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
16283 __func__, __LINE__,
16284 rsm->r_start, tp->snd_una, tp, rack, rsm));
16285 sb_offset = rsm->r_start - tp->snd_una;
16288 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) {
16289 /* We have a retransmit that takes precedence */
16290 rsm->r_flags &= ~RACK_TLP;
16291 if ((!IN_FASTRECOVERY(tp->t_flags)) &&
16292 ((tp->t_flags & TF_WASFRECOVERY) == 0)) {
16293 /* Enter recovery if not induced by a time-out */
16294 rack->r_ctl.rc_rsm_start = rsm->r_start;
16295 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
16296 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
16297 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
16300 if (SEQ_LT(rsm->r_start, tp->snd_una)) {
16301 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
16302 tp, rack, rsm, rsm->r_start, tp->snd_una);
16305 len = rsm->r_end - rsm->r_start;
16306 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
16307 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
16308 __func__, __LINE__,
16309 rsm->r_start, tp->snd_una, tp, rack, rsm));
16310 sb_offset = rsm->r_start - tp->snd_una;
16316 KMOD_TCPSTAT_INC(tcps_sack_rexmits);
16317 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes,
16319 counter_u64_add(rack_rtm_prr_retran, 1);
16321 } else if (rack->r_ctl.rc_tlpsend) {
16322 /* Tail loss probe */
16328 * Check if we can do a TLP with a RACK'd packet
16329 * this can happen if we are not doing the rack
16330 * cheat and we skipped to a TLP and it
16333 rsm = rack->r_ctl.rc_tlpsend;
16334 rsm->r_flags |= RACK_TLP;
16336 rack->r_ctl.rc_tlpsend = NULL;
16338 tlen = rsm->r_end - rsm->r_start;
16341 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
16342 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
16343 __func__, __LINE__,
16344 rsm->r_start, tp->snd_una, tp, rack, rsm));
16345 sb_offset = rsm->r_start - tp->snd_una;
16346 cwin = min(tp->snd_wnd, tlen);
16349 if (rack->r_must_retran &&
16352 * Non-Sack and we had a RTO or MTU change, we
16353 * need to retransmit until we reach
16354 * the former snd_max (rack->r_ctl.rc_snd_max_at_rto).
16356 if (SEQ_GT(tp->snd_max, tp->snd_una)) {
16357 int sendwin, flight;
16359 sendwin = min(tp->snd_wnd, tp->snd_cwnd);
16360 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto);
16361 if (flight >= sendwin) {
16362 so = inp->inp_socket;
16364 goto just_return_nolock;
16366 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
16367 KASSERT(rsm != NULL, ("rsm is NULL rack:%p r_must_retran set", rack));
16370 rack->r_must_retran = 0;
16371 rack->r_ctl.rc_out_at_rto = 0;
16372 rack->r_must_retran = 0;
16373 so = inp->inp_socket;
16375 goto just_return_nolock;
16378 len = rsm->r_end - rsm->r_start;
16380 sb_offset = rsm->r_start - tp->snd_una;
16384 /* We must be done if there is nothing outstanding */
16385 rack->r_must_retran = 0;
16386 rack->r_ctl.rc_out_at_rto = 0;
16390 * Enforce a connection sendmap count limit if set
16391 * as long as we are not retransmiting.
16393 if ((rsm == NULL) &&
16394 (rack->do_detection == 0) &&
16395 (V_tcp_map_entries_limit > 0) &&
16396 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
16397 counter_u64_add(rack_to_alloc_limited, 1);
16398 if (!rack->alloc_limit_reported) {
16399 rack->alloc_limit_reported = 1;
16400 counter_u64_add(rack_alloc_limited_conns, 1);
16402 so = inp->inp_socket;
16404 goto just_return_nolock;
16406 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
16407 /* we are retransmitting the fin */
16411 * When retransmitting data do *not* include the
16412 * FIN. This could happen from a TLP probe.
16418 /* For debugging */
16419 rack->r_ctl.rc_rsm_at_retran = rsm;
16421 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo &&
16422 ((rsm->r_flags & RACK_HAS_FIN) == 0)) {
16425 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len);
16429 so = inp->inp_socket;
16431 if (do_a_prefetch == 0) {
16432 kern_prefetch(sb, &do_a_prefetch);
16435 #ifdef NETFLIX_SHARED_CWND
16436 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) &&
16437 rack->rack_enable_scwnd) {
16438 /* We are doing cwnd sharing */
16439 if (rack->gp_ready &&
16440 (rack->rack_attempted_scwnd == 0) &&
16441 (rack->r_ctl.rc_scw == NULL) &&
16443 /* The pcbid is in, lets make an attempt */
16444 counter_u64_add(rack_try_scwnd, 1);
16445 rack->rack_attempted_scwnd = 1;
16446 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp,
16447 &rack->r_ctl.rc_scw_index,
16450 if (rack->r_ctl.rc_scw &&
16451 (rack->rack_scwnd_is_idle == 1) &&
16452 sbavail(&so->so_snd)) {
16453 /* we are no longer out of data */
16454 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
16455 rack->rack_scwnd_is_idle = 0;
16457 if (rack->r_ctl.rc_scw) {
16458 /* First lets update and get the cwnd */
16459 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw,
16460 rack->r_ctl.rc_scw_index,
16461 tp->snd_cwnd, tp->snd_wnd, segsiz);
16466 * Get standard flags, and add SYN or FIN if requested by 'hidden'
16469 if (tp->t_flags & TF_NEEDFIN)
16471 if (tp->t_flags & TF_NEEDSYN)
16473 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
16475 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
16477 kern_prefetch(end_rsm, &prefetch_rsm);
16482 * If snd_nxt == snd_max and we have transmitted a FIN, the
16483 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
16484 * negative length. This can also occur when TCP opens up its
16485 * congestion window while receiving additional duplicate acks after
16486 * fast-retransmit because TCP will reset snd_nxt to snd_max after
16487 * the fast-retransmit.
16489 * In the normal retransmit-FIN-only case, however, snd_nxt will be
16490 * set to snd_una, the sb_offset will be 0, and the length may wind
16493 * If sack_rxmit is true we are retransmitting from the scoreboard
16494 * in which case len is already set.
16496 if ((sack_rxmit == 0) &&
16497 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) {
16500 avail = sbavail(sb);
16501 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail)
16502 sb_offset = tp->snd_nxt - tp->snd_una;
16505 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) {
16506 if (rack->r_ctl.rc_tlp_new_data) {
16507 /* TLP is forcing out new data */
16508 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
16509 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
16511 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) {
16512 if (tp->snd_wnd > sb_offset)
16513 len = tp->snd_wnd - sb_offset;
16517 len = rack->r_ctl.rc_tlp_new_data;
16519 rack->r_ctl.rc_tlp_new_data = 0;
16522 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset);
16524 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) {
16526 * For prr=off, we need to send only 1 MSS
16527 * at a time. We do this because another sack could
16528 * be arriving that causes us to send retransmits and
16529 * we don't want to be on a long pace due to a larger send
16530 * that keeps us from sending out the retransmit.
16535 uint32_t outstanding;
16537 * We are inside of a Fast recovery episode, this
16538 * is caused by a SACK or 3 dup acks. At this point
16539 * we have sent all the retransmissions and we rely
16540 * on PRR to dictate what we will send in the form of
16544 outstanding = tp->snd_max - tp->snd_una;
16545 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) {
16546 if (tp->snd_wnd > outstanding) {
16547 len = tp->snd_wnd - outstanding;
16548 /* Check to see if we have the data */
16549 if ((sb_offset + len) > avail) {
16550 /* It does not all fit */
16551 if (avail > sb_offset)
16552 len = avail - sb_offset;
16559 } else if (avail > sb_offset) {
16560 len = avail - sb_offset;
16565 if (len > rack->r_ctl.rc_prr_sndcnt) {
16566 len = rack->r_ctl.rc_prr_sndcnt;
16570 counter_u64_add(rack_rtm_prr_newdata, 1);
16573 if (len > segsiz) {
16575 * We should never send more than a MSS when
16576 * retransmitting or sending new data in prr
16577 * mode unless the override flag is on. Most
16578 * likely the PRR algorithm is not going to
16579 * let us send a lot as well :-)
16581 if (rack->r_ctl.rc_prr_sendalot == 0) {
16584 } else if (len < segsiz) {
16586 * Do we send any? The idea here is if the
16587 * send empty's the socket buffer we want to
16588 * do it. However if not then lets just wait
16589 * for our prr_sndcnt to get bigger.
16593 leftinsb = sbavail(sb) - sb_offset;
16594 if (leftinsb > len) {
16595 /* This send does not empty the sb */
16600 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) {
16602 * If you have not established
16603 * and are not doing FAST OPEN
16606 if ((sack_rxmit == 0) &&
16607 (!IS_FASTOPEN(tp->t_flags))){
16612 if (prefetch_so_done == 0) {
16613 kern_prefetch(so, &prefetch_so_done);
16614 prefetch_so_done = 1;
16617 * Lop off SYN bit if it has already been sent. However, if this is
16618 * SYN-SENT state and if segment contains data and if we don't know
16619 * that foreign host supports TAO, suppress sending segment.
16621 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) &&
16622 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) {
16624 * When sending additional segments following a TFO SYN|ACK,
16625 * do not include the SYN bit.
16627 if (IS_FASTOPEN(tp->t_flags) &&
16628 (tp->t_state == TCPS_SYN_RECEIVED))
16632 * Be careful not to send data and/or FIN on SYN segments. This
16633 * measure is needed to prevent interoperability problems with not
16634 * fully conformant TCP implementations.
16636 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
16641 * On TFO sockets, ensure no data is sent in the following cases:
16643 * - When retransmitting SYN|ACK on a passively-created socket
16645 * - When retransmitting SYN on an actively created socket
16647 * - When sending a zero-length cookie (cookie request) on an
16648 * actively created socket
16650 * - When the socket is in the CLOSED state (RST is being sent)
16652 if (IS_FASTOPEN(tp->t_flags) &&
16653 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
16654 ((tp->t_state == TCPS_SYN_SENT) &&
16655 (tp->t_tfo_client_cookie_len == 0)) ||
16656 (flags & TH_RST))) {
16660 /* Without fast-open there should never be data sent on a SYN */
16661 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) {
16662 tp->snd_nxt = tp->iss;
16665 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) {
16666 /* We only send 1 MSS if we have a DSACK block */
16667 add_flag |= RACK_SENT_W_DSACK;
16673 * If FIN has been sent but not acked, but we haven't been
16674 * called to retransmit, len will be < 0. Otherwise, window
16675 * shrank after we sent into it. If window shrank to 0,
16676 * cancel pending retransmit, pull snd_nxt back to (closed)
16677 * window, and set the persist timer if it isn't already
16678 * going. If the window didn't close completely, just wait
16681 * We also do a general check here to ensure that we will
16682 * set the persist timer when we have data to send, but a
16683 * 0-byte window. This makes sure the persist timer is set
16684 * even if the packet hits one of the "goto send" lines
16688 if ((tp->snd_wnd == 0) &&
16689 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
16690 (tp->snd_una == tp->snd_max) &&
16691 (sb_offset < (int)sbavail(sb))) {
16692 rack_enter_persist(tp, rack, cts);
16694 } else if ((rsm == NULL) &&
16695 (doing_tlp == 0) &&
16696 (len < pace_max_seg)) {
16698 * We are not sending a maximum sized segment for
16699 * some reason. Should we not send anything (think
16700 * sws or persists)?
16702 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
16703 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
16705 (len < (int)(sbavail(sb) - sb_offset))) {
16707 * Here the rwnd is less than
16708 * the minimum pacing size, this is not a retransmit,
16709 * we are established and
16710 * the send is not the last in the socket buffer
16711 * we send nothing, and we may enter persists
16712 * if nothing is outstanding.
16715 if (tp->snd_max == tp->snd_una) {
16717 * Nothing out we can
16718 * go into persists.
16720 rack_enter_persist(tp, rack, cts);
16722 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) &&
16723 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
16724 (len < (int)(sbavail(sb) - sb_offset)) &&
16727 * Here we are not retransmitting, and
16728 * the cwnd is not so small that we could
16729 * not send at least a min size (rxt timer
16730 * not having gone off), We have 2 segments or
16731 * more already in flight, its not the tail end
16732 * of the socket buffer and the cwnd is blocking
16733 * us from sending out a minimum pacing segment size.
16734 * Lets not send anything.
16737 } else if (((tp->snd_wnd - ctf_outstanding(tp)) <
16738 min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
16739 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
16740 (len < (int)(sbavail(sb) - sb_offset)) &&
16741 (TCPS_HAVEESTABLISHED(tp->t_state))) {
16743 * Here we have a send window but we have
16744 * filled it up and we can't send another pacing segment.
16745 * We also have in flight more than 2 segments
16746 * and we are not completing the sb i.e. we allow
16747 * the last bytes of the sb to go out even if
16748 * its not a full pacing segment.
16751 } else if ((rack->r_ctl.crte != NULL) &&
16752 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) &&
16753 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) &&
16754 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) &&
16755 (len < (int)(sbavail(sb) - sb_offset))) {
16757 * Here we are doing hardware pacing, this is not a TLP,
16758 * we are not sending a pace max segment size, there is rwnd
16759 * room to send at least N pace_max_seg, the cwnd is greater
16760 * than or equal to a full pacing segments plus 4 mss and we have 2 or
16761 * more segments in flight and its not the tail of the socket buffer.
16763 * We don't want to send instead we need to get more ack's in to
16764 * allow us to send a full pacing segment. Normally, if we are pacing
16765 * about the right speed, we should have finished our pacing
16766 * send as most of the acks have come back if we are at the
16767 * right rate. This is a bit fuzzy since return path delay
16768 * can delay the acks, which is why we want to make sure we
16769 * have cwnd space to have a bit more than a max pace segments in flight.
16771 * If we have not gotten our acks back we are pacing at too high a
16772 * rate delaying will not hurt and will bring our GP estimate down by
16773 * injecting the delay. If we don't do this we will send
16774 * 2 MSS out in response to the acks being clocked in which
16775 * defeats the point of hw-pacing (i.e. to help us get
16776 * larger TSO's out).
16783 /* len will be >= 0 after this point. */
16784 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
16785 rack_sndbuf_autoscale(rack);
16787 * Decide if we can use TCP Segmentation Offloading (if supported by
16790 * TSO may only be used if we are in a pure bulk sending state. The
16791 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
16792 * options prevent using TSO. With TSO the TCP header is the same
16793 * (except for the sequence number) for all generated packets. This
16794 * makes it impossible to transmit any options which vary per
16795 * generated segment or packet.
16797 * IPv4 handling has a clear separation of ip options and ip header
16798 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
16799 * the right thing below to provide length of just ip options and thus
16800 * checking for ipoptlen is enough to decide if ip options are present.
16803 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
16805 * Pre-calculate here as we save another lookup into the darknesses
16806 * of IPsec that way and can actually decide if TSO is ok.
16809 if (isipv6 && IPSEC_ENABLED(ipv6))
16810 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb);
16816 if (IPSEC_ENABLED(ipv4))
16817 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb);
16821 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
16822 ipoptlen += ipsec_optlen;
16824 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz &&
16825 (tp->t_port == 0) &&
16826 ((tp->t_flags & TF_SIGNATURE) == 0) &&
16827 tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
16831 uint32_t outstanding;
16833 outstanding = tp->snd_max - tp->snd_una;
16834 if (tp->t_flags & TF_SENTFIN) {
16836 * If we sent a fin, snd_max is 1 higher than
16842 if ((rsm->r_flags & RACK_HAS_FIN) == 0)
16845 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
16850 recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
16851 (long)TCP_MAXWIN << tp->rcv_scale);
16854 * Sender silly window avoidance. We transmit under the following
16855 * conditions when len is non-zero:
16857 * - We have a full segment (or more with TSO) - This is the last
16858 * buffer in a write()/send() and we are either idle or running
16859 * NODELAY - we've timed out (e.g. persist timer) - we have more
16860 * then 1/2 the maximum send window's worth of data (receiver may be
16861 * limited the window size) - we need to retransmit
16864 if (len >= segsiz) {
16868 * NOTE! on localhost connections an 'ack' from the remote
16869 * end may occur synchronously with the output and cause us
16870 * to flush a buffer queued with moretocome. XXX
16873 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
16874 (idle || (tp->t_flags & TF_NODELAY)) &&
16875 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
16876 (tp->t_flags & TF_NOPUSH) == 0) {
16880 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */
16884 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
16888 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */
16896 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) &&
16897 (ctf_outstanding(tp) < (segsiz * 2))) {
16899 * We have less than two MSS outstanding (delayed ack)
16900 * and our rwnd will not let us send a full sized
16901 * MSS. Lets go ahead and let this small segment
16902 * out because we want to try to have at least two
16903 * packets inflight to not be caught by delayed ack.
16910 * Sending of standalone window updates.
16912 * Window updates are important when we close our window due to a
16913 * full socket buffer and are opening it again after the application
16914 * reads data from it. Once the window has opened again and the
16915 * remote end starts to send again the ACK clock takes over and
16916 * provides the most current window information.
16918 * We must avoid the silly window syndrome whereas every read from
16919 * the receive buffer, no matter how small, causes a window update
16920 * to be sent. We also should avoid sending a flurry of window
16921 * updates when the socket buffer had queued a lot of data and the
16922 * application is doing small reads.
16924 * Prevent a flurry of pointless window updates by only sending an
16925 * update when we can increase the advertized window by more than
16926 * 1/4th of the socket buffer capacity. When the buffer is getting
16927 * full or is very small be more aggressive and send an update
16928 * whenever we can increase by two mss sized segments. In all other
16929 * situations the ACK's to new incoming data will carry further
16930 * window increases.
16932 * Don't send an independent window update if a delayed ACK is
16933 * pending (it will get piggy-backed on it) or the remote side
16934 * already has done a half-close and won't send more data. Skip
16935 * this if the connection is in T/TCP half-open state.
16937 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
16938 !(tp->t_flags & TF_DELACK) &&
16939 !TCPS_HAVERCVDFIN(tp->t_state)) {
16941 * "adv" is the amount we could increase the window, taking
16942 * into account that we are limited by TCP_MAXWIN <<
16949 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
16950 oldwin = (tp->rcv_adv - tp->rcv_nxt);
16954 /* We can't increase the window */
16961 * If the new window size ends up being the same as or less
16962 * than the old size when it is scaled, then don't force
16965 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
16968 if (adv >= (int32_t)(2 * segsiz) &&
16969 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
16970 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
16971 so->so_rcv.sb_hiwat <= 8 * segsiz)) {
16975 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) {
16983 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
16984 * is also a catch-all for the retransmit timer timeout case.
16986 if (tp->t_flags & TF_ACKNOW) {
16990 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
16995 * If our state indicates that FIN should be sent and we have not
16996 * yet done so, then we need to send.
16998 if ((flags & TH_FIN) &&
16999 (tp->snd_nxt == tp->snd_una)) {
17004 * No reason to send a segment, just return.
17007 SOCKBUF_UNLOCK(sb);
17008 just_return_nolock:
17010 int app_limited = CTF_JR_SENT_DATA;
17012 if (tot_len_this_send > 0) {
17013 /* Make sure snd_nxt is up to max */
17014 rack->r_ctl.fsb.recwin = recwin;
17015 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz);
17016 if ((error == 0) &&
17018 ((flags & (TH_SYN|TH_FIN)) == 0) &&
17020 (tp->snd_nxt == tp->snd_max) &&
17021 (tp->rcv_numsacks == 0) &&
17022 rack->r_fsb_inited &&
17023 TCPS_HAVEESTABLISHED(tp->t_state) &&
17024 (rack->r_must_retran == 0) &&
17025 ((tp->t_flags & TF_NEEDFIN) == 0) &&
17026 (len > 0) && (orig_len > 0) &&
17027 (orig_len > len) &&
17028 ((orig_len - len) >= segsiz) &&
17030 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
17031 /* We can send at least one more MSS using our fsb */
17033 rack->r_fast_output = 1;
17034 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
17035 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
17036 rack->r_ctl.fsb.tcp_flags = flags;
17037 rack->r_ctl.fsb.left_to_send = orig_len - len;
17039 rack->r_ctl.fsb.hw_tls = 1;
17041 rack->r_ctl.fsb.hw_tls = 0;
17042 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
17043 ("rack:%p left_to_send:%u sbavail:%u out:%u",
17044 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
17045 (tp->snd_max - tp->snd_una)));
17046 if (rack->r_ctl.fsb.left_to_send < segsiz)
17047 rack->r_fast_output = 0;
17049 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
17050 rack->r_ctl.fsb.rfo_apply_push = 1;
17052 rack->r_ctl.fsb.rfo_apply_push = 0;
17055 rack->r_fast_output = 0;
17058 rack_log_fsb(rack, tp, so, flags,
17059 ipoptlen, orig_len, len, 0,
17060 1, optlen, __LINE__, 1);
17061 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
17062 tp->snd_nxt = tp->snd_max;
17064 int end_window = 0;
17065 uint32_t seq = tp->gput_ack;
17067 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
17070 * Mark the last sent that we just-returned (hinting
17071 * that delayed ack may play a role in any rtt measurement).
17073 rsm->r_just_ret = 1;
17075 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
17076 rack->r_ctl.rc_agg_delayed = 0;
17079 rack->r_ctl.rc_agg_early = 0;
17080 if ((ctf_outstanding(tp) +
17081 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)),
17082 minseg)) >= tp->snd_wnd) {
17083 /* We are limited by the rwnd */
17084 app_limited = CTF_JR_RWND_LIMITED;
17085 if (IN_FASTRECOVERY(tp->t_flags))
17086 rack->r_ctl.rc_prr_sndcnt = 0;
17087 } else if (ctf_outstanding(tp) >= sbavail(sb)) {
17088 /* We are limited by whats available -- app limited */
17089 app_limited = CTF_JR_APP_LIMITED;
17090 if (IN_FASTRECOVERY(tp->t_flags))
17091 rack->r_ctl.rc_prr_sndcnt = 0;
17092 } else if ((idle == 0) &&
17093 ((tp->t_flags & TF_NODELAY) == 0) &&
17094 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
17097 * No delay is not on and the
17098 * user is sending less than 1MSS. This
17099 * brings out SWS avoidance so we
17100 * don't send. Another app-limited case.
17102 app_limited = CTF_JR_APP_LIMITED;
17103 } else if (tp->t_flags & TF_NOPUSH) {
17105 * The user has requested no push of
17106 * the last segment and we are
17107 * at the last segment. Another app
17110 app_limited = CTF_JR_APP_LIMITED;
17111 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) {
17113 app_limited = CTF_JR_CWND_LIMITED;
17114 } else if (IN_FASTRECOVERY(tp->t_flags) &&
17115 (rack->rack_no_prr == 0) &&
17116 (rack->r_ctl.rc_prr_sndcnt < segsiz)) {
17117 app_limited = CTF_JR_PRR;
17119 /* Now why here are we not sending? */
17122 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use);
17125 app_limited = CTF_JR_ASSESSING;
17128 * App limited in some fashion, for our pacing GP
17129 * measurements we don't want any gap (even cwnd).
17130 * Close down the measurement window.
17132 if (rack_cwnd_block_ends_measure &&
17133 ((app_limited == CTF_JR_CWND_LIMITED) ||
17134 (app_limited == CTF_JR_PRR))) {
17136 * The reason we are not sending is
17137 * the cwnd (or prr). We have been configured
17138 * to end the measurement window in
17142 } else if (rack_rwnd_block_ends_measure &&
17143 (app_limited == CTF_JR_RWND_LIMITED)) {
17145 * We are rwnd limited and have been
17146 * configured to end the measurement
17147 * window in this case.
17150 } else if (app_limited == CTF_JR_APP_LIMITED) {
17152 * A true application limited period, we have
17156 } else if (app_limited == CTF_JR_ASSESSING) {
17158 * In the assessing case we hit the end of
17159 * the if/else and had no known reason
17160 * This will panic us under invariants..
17162 * If we get this out in logs we need to
17163 * investagate which reason we missed.
17170 if ((tp->t_flags & TF_GPUTINPROG) &&
17171 SEQ_GT(tp->gput_ack, tp->snd_max)) {
17172 /* Mark the last packet has app limited */
17173 tp->gput_ack = tp->snd_max;
17176 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
17177 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
17178 if (rack->r_ctl.rc_app_limited_cnt == 0)
17179 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
17182 * Go out to the end app limited and mark
17183 * this new one as next and move the end_appl up
17186 if (rack->r_ctl.rc_end_appl)
17187 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
17188 rack->r_ctl.rc_end_appl = rsm;
17190 rsm->r_flags |= RACK_APP_LIMITED;
17191 rack->r_ctl.rc_app_limited_cnt++;
17194 rack_log_pacing_delay_calc(rack,
17195 rack->r_ctl.rc_app_limited_cnt, seq,
17196 tp->gput_ack, 0, 0, 4, __LINE__, NULL);
17200 /* set the rack tcb into the slot N */
17201 counter_u64_add(rack_paced_segments, 1);
17202 } else if (tot_len_this_send) {
17203 counter_u64_add(rack_unpaced_segments, 1);
17205 /* Check if we need to go into persists or not */
17206 if ((tp->snd_max == tp->snd_una) &&
17207 TCPS_HAVEESTABLISHED(tp->t_state) &&
17209 (sbavail(sb) > tp->snd_wnd) &&
17210 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) {
17211 /* Yes lets make sure to move to persist before timer-start */
17212 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
17214 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack);
17215 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use);
17217 #ifdef NETFLIX_SHARED_CWND
17218 if ((sbavail(sb) == 0) &&
17219 rack->r_ctl.rc_scw) {
17220 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
17221 rack->rack_scwnd_is_idle = 1;
17224 #ifdef TCP_ACCOUNTING
17225 if (tot_len_this_send > 0) {
17226 crtsc = get_cyclecount();
17227 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17228 tp->tcp_cnt_counters[SND_OUT_DATA]++;
17230 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1);
17231 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17232 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
17234 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val));
17235 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17236 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz);
17238 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz));
17240 crtsc = get_cyclecount();
17241 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17242 tp->tcp_cnt_counters[SND_LIMITED]++;
17244 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1);
17245 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17246 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val);
17248 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val));
17255 if (rsm || sack_rxmit)
17256 counter_u64_add(rack_nfto_resend, 1);
17258 counter_u64_add(rack_non_fto_send, 1);
17259 if ((flags & TH_FIN) &&
17262 * We do not transmit a FIN
17263 * with data outstanding. We
17264 * need to make it so all data
17269 /* Enforce stack imposed max seg size if we have one */
17270 if (rack->r_ctl.rc_pace_max_segs &&
17271 (len > rack->r_ctl.rc_pace_max_segs)) {
17273 len = rack->r_ctl.rc_pace_max_segs;
17275 SOCKBUF_LOCK_ASSERT(sb);
17278 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
17280 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
17283 * Before ESTABLISHED, force sending of initial options unless TCP
17284 * set not to do any options. NOTE: we assume that the IP/TCP header
17285 * plus TCP options always fit in a single mbuf, leaving room for a
17286 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
17287 * + optlen <= MCLBYTES
17292 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
17295 hdrlen = sizeof(struct tcpiphdr);
17298 * Compute options for segment. We only have to care about SYN and
17299 * established connection segments. Options for SYN-ACK segments
17300 * are handled in TCP syncache.
17303 if ((tp->t_flags & TF_NOOPT) == 0) {
17304 /* Maximum segment size. */
17305 if (flags & TH_SYN) {
17306 tp->snd_nxt = tp->iss;
17307 to.to_mss = tcp_mssopt(&inp->inp_inc);
17309 to.to_mss -= V_tcp_udp_tunneling_overhead;
17310 to.to_flags |= TOF_MSS;
17313 * On SYN or SYN|ACK transmits on TFO connections,
17314 * only include the TFO option if it is not a
17315 * retransmit, as the presence of the TFO option may
17316 * have caused the original SYN or SYN|ACK to have
17317 * been dropped by a middlebox.
17319 if (IS_FASTOPEN(tp->t_flags) &&
17320 (tp->t_rxtshift == 0)) {
17321 if (tp->t_state == TCPS_SYN_RECEIVED) {
17322 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
17324 (u_int8_t *)&tp->t_tfo_cookie.server;
17325 to.to_flags |= TOF_FASTOPEN;
17327 } else if (tp->t_state == TCPS_SYN_SENT) {
17329 tp->t_tfo_client_cookie_len;
17331 tp->t_tfo_cookie.client;
17332 to.to_flags |= TOF_FASTOPEN;
17335 * If we wind up having more data to
17336 * send with the SYN than can fit in
17337 * one segment, don't send any more
17338 * until the SYN|ACK comes back from
17345 /* Window scaling. */
17346 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
17347 to.to_wscale = tp->request_r_scale;
17348 to.to_flags |= TOF_SCALE;
17351 if ((tp->t_flags & TF_RCVD_TSTMP) ||
17352 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
17353 to.to_tsval = ms_cts + tp->ts_offset;
17354 to.to_tsecr = tp->ts_recent;
17355 to.to_flags |= TOF_TS;
17357 /* Set receive buffer autosizing timestamp. */
17358 if (tp->rfbuf_ts == 0 &&
17359 (so->so_rcv.sb_flags & SB_AUTOSIZE))
17360 tp->rfbuf_ts = tcp_ts_getticks();
17361 /* Selective ACK's. */
17362 if (tp->t_flags & TF_SACK_PERMIT) {
17363 if (flags & TH_SYN)
17364 to.to_flags |= TOF_SACKPERM;
17365 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
17366 tp->rcv_numsacks > 0) {
17367 to.to_flags |= TOF_SACK;
17368 to.to_nsacks = tp->rcv_numsacks;
17369 to.to_sacks = (u_char *)tp->sackblks;
17372 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
17373 /* TCP-MD5 (RFC2385). */
17374 if (tp->t_flags & TF_SIGNATURE)
17375 to.to_flags |= TOF_SIGNATURE;
17376 #endif /* TCP_SIGNATURE */
17378 /* Processing the options. */
17379 hdrlen += optlen = tcp_addoptions(&to, opt);
17381 * If we wanted a TFO option to be added, but it was unable
17382 * to fit, ensure no data is sent.
17384 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
17385 !(to.to_flags & TOF_FASTOPEN))
17389 if (V_tcp_udp_tunneling_port == 0) {
17390 /* The port was removed?? */
17391 SOCKBUF_UNLOCK(&so->so_snd);
17392 #ifdef TCP_ACCOUNTING
17393 crtsc = get_cyclecount();
17394 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17395 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
17397 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
17398 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17399 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
17401 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
17404 return (EHOSTUNREACH);
17406 hdrlen += sizeof(struct udphdr);
17410 ipoptlen = ip6_optlen(tp->t_inpcb);
17413 if (tp->t_inpcb->inp_options)
17414 ipoptlen = tp->t_inpcb->inp_options->m_len -
17415 offsetof(struct ipoption, ipopt_list);
17418 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
17419 ipoptlen += ipsec_optlen;
17423 * Adjust data length if insertion of options will bump the packet
17424 * length beyond the t_maxseg length. Clear the FIN bit because we
17425 * cut off the tail of the segment.
17427 if (len + optlen + ipoptlen > tp->t_maxseg) {
17429 uint32_t if_hw_tsomax;
17433 /* extract TSO information */
17434 if_hw_tsomax = tp->t_tsomax;
17435 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
17436 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
17437 KASSERT(ipoptlen == 0,
17438 ("%s: TSO can't do IP options", __func__));
17441 * Check if we should limit by maximum payload
17444 if (if_hw_tsomax != 0) {
17445 /* compute maximum TSO length */
17446 max_len = (if_hw_tsomax - hdrlen -
17448 if (max_len <= 0) {
17450 } else if (len > max_len) {
17457 * Prevent the last segment from being fractional
17458 * unless the send sockbuf can be emptied:
17460 max_len = (tp->t_maxseg - optlen);
17461 if ((sb_offset + len) < sbavail(sb)) {
17462 moff = len % (u_int)max_len;
17469 * In case there are too many small fragments don't
17472 if (len <= segsiz) {
17477 * Send the FIN in a separate segment after the bulk
17478 * sending is done. We don't trust the TSO
17479 * implementations to clear the FIN flag on all but
17480 * the last segment.
17482 if (tp->t_flags & TF_NEEDFIN) {
17487 if (optlen + ipoptlen >= tp->t_maxseg) {
17489 * Since we don't have enough space to put
17490 * the IP header chain and the TCP header in
17491 * one packet as required by RFC 7112, don't
17492 * send it. Also ensure that at least one
17493 * byte of the payload can be put into the
17496 SOCKBUF_UNLOCK(&so->so_snd);
17501 len = tp->t_maxseg - optlen - ipoptlen;
17508 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
17509 ("%s: len > IP_MAXPACKET", __func__));
17512 if (max_linkhdr + hdrlen > MCLBYTES)
17514 if (max_linkhdr + hdrlen > MHLEN)
17516 panic("tcphdr too big");
17520 * This KASSERT is here to catch edge cases at a well defined place.
17521 * Before, those had triggered (random) panic conditions further
17524 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
17526 (flags & TH_FIN) &&
17529 * We have outstanding data, don't send a fin by itself!.
17534 * Grab a header mbuf, attaching a copy of data to be transmitted,
17535 * and initialize the header from the template for sends on this
17538 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0;
17543 if (rack->r_ctl.rc_pace_max_segs)
17544 max_val = rack->r_ctl.rc_pace_max_segs;
17545 else if (rack->rc_user_set_max_segs)
17546 max_val = rack->rc_user_set_max_segs * segsiz;
17550 * We allow a limit on sending with hptsi.
17552 if (len > max_val) {
17557 if (MHLEN < hdrlen + max_linkhdr)
17558 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
17561 m = m_gethdr(M_NOWAIT, MT_DATA);
17564 SOCKBUF_UNLOCK(sb);
17569 m->m_data += max_linkhdr;
17573 * Start the m_copy functions from the closest mbuf to the
17574 * sb_offset in the socket buffer chain.
17576 mb = sbsndptr_noadv(sb, sb_offset, &moff);
17579 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
17580 m_copydata(mb, moff, (int)len,
17581 mtod(m, caddr_t)+hdrlen);
17582 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
17583 sbsndptr_adv(sb, mb, len);
17586 struct sockbuf *msb;
17588 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
17592 m->m_next = tcp_m_copym(
17594 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
17595 ((rsm == NULL) ? hw_tls : 0)
17596 #ifdef NETFLIX_COPY_ARGS
17600 if (len <= (tp->t_maxseg - optlen)) {
17602 * Must have ran out of mbufs for the copy
17603 * shorten it to no longer need tso. Lets
17604 * not put on sendalot since we are low on
17609 if (m->m_next == NULL) {
17610 SOCKBUF_UNLOCK(sb);
17617 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
17618 if (rsm && (rsm->r_flags & RACK_TLP)) {
17620 * TLP should not count in retran count, but
17623 counter_u64_add(rack_tlp_retran, 1);
17624 counter_u64_add(rack_tlp_retran_bytes, len);
17626 tp->t_sndrexmitpack++;
17627 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
17628 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
17631 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
17635 KMOD_TCPSTAT_INC(tcps_sndpack);
17636 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
17638 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
17643 * If we're sending everything we've got, set PUSH. (This
17644 * will keep happy those implementations which only give
17645 * data to the user when a buffer fills or a PUSH comes in.)
17647 if (sb_offset + len == sbused(sb) &&
17649 !(flags & TH_SYN)) {
17651 add_flag |= RACK_HAD_PUSH;
17654 SOCKBUF_UNLOCK(sb);
17656 SOCKBUF_UNLOCK(sb);
17657 if (tp->t_flags & TF_ACKNOW)
17658 KMOD_TCPSTAT_INC(tcps_sndacks);
17659 else if (flags & (TH_SYN | TH_FIN | TH_RST))
17660 KMOD_TCPSTAT_INC(tcps_sndctrl);
17662 KMOD_TCPSTAT_INC(tcps_sndwinup);
17664 m = m_gethdr(M_NOWAIT, MT_DATA);
17671 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
17673 M_ALIGN(m, hdrlen);
17676 m->m_data += max_linkhdr;
17679 SOCKBUF_UNLOCK_ASSERT(sb);
17680 m->m_pkthdr.rcvif = (struct ifnet *)0;
17682 mac_inpcb_create_mbuf(inp, m);
17684 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
17687 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
17690 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
17691 th = rack->r_ctl.fsb.th;
17692 udp = rack->r_ctl.fsb.udp;
17696 ulen = hdrlen + len - sizeof(struct ip6_hdr);
17699 ulen = hdrlen + len - sizeof(struct ip);
17700 udp->uh_ulen = htons(ulen);
17705 ip6 = mtod(m, struct ip6_hdr *);
17707 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
17708 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
17709 udp->uh_dport = tp->t_port;
17710 ulen = hdrlen + len - sizeof(struct ip6_hdr);
17711 udp->uh_ulen = htons(ulen);
17712 th = (struct tcphdr *)(udp + 1);
17714 th = (struct tcphdr *)(ip6 + 1);
17715 tcpip_fillheaders(inp, tp->t_port, ip6, th);
17719 ip = mtod(m, struct ip *);
17721 ipov = (struct ipovly *)ip;
17724 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
17725 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
17726 udp->uh_dport = tp->t_port;
17727 ulen = hdrlen + len - sizeof(struct ip);
17728 udp->uh_ulen = htons(ulen);
17729 th = (struct tcphdr *)(udp + 1);
17731 th = (struct tcphdr *)(ip + 1);
17732 tcpip_fillheaders(inp, tp->t_port, ip, th);
17736 * Fill in fields, remembering maximum advertised window for use in
17737 * delaying messages about window sizes. If resending a FIN, be sure
17738 * not to use a new sequence number.
17740 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
17741 tp->snd_nxt == tp->snd_max)
17744 * If we are starting a connection, send ECN setup SYN packet. If we
17745 * are on a retransmit, we may resend those bits a number of times
17748 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) {
17749 if (tp->t_rxtshift >= 1) {
17750 if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
17751 flags |= TH_ECE | TH_CWR;
17753 flags |= TH_ECE | TH_CWR;
17755 /* Handle parallel SYN for ECN */
17756 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
17757 (tp->t_flags2 & TF2_ECN_SND_ECE)) {
17759 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
17761 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
17762 (tp->t_flags2 & TF2_ECN_PERMIT)) {
17764 * If the peer has ECN, mark data packets with ECN capable
17765 * transmission (ECT). Ignore pure ack packets,
17768 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) &&
17769 (sack_rxmit == 0)) {
17772 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
17775 ip->ip_tos |= IPTOS_ECN_ECT0;
17776 KMOD_TCPSTAT_INC(tcps_ecn_ect0);
17778 * Reply with proper ECN notifications.
17779 * Only set CWR on new data segments.
17781 if (tp->t_flags2 & TF2_ECN_SND_CWR) {
17783 tp->t_flags2 &= ~TF2_ECN_SND_CWR;
17786 if (tp->t_flags2 & TF2_ECN_SND_ECE)
17790 * If we are doing retransmissions, then snd_nxt will not reflect
17791 * the first unsent octet. For ACK only packets, we do not want the
17792 * sequence number of the retransmitted packet, we want the sequence
17793 * number of the next unsent octet. So, if there is no data (and no
17794 * SYN or FIN), use snd_max instead of snd_nxt when filling in
17795 * ti_seq. But if we are in persist state, snd_max might reflect
17796 * one byte beyond the right edge of the window, so use snd_nxt in
17797 * that case, since we know we aren't doing a retransmission.
17798 * (retransmit and persist are mutually exclusive...)
17800 if (sack_rxmit == 0) {
17801 if (len || (flags & (TH_SYN | TH_FIN))) {
17802 th->th_seq = htonl(tp->snd_nxt);
17803 rack_seq = tp->snd_nxt;
17805 th->th_seq = htonl(tp->snd_max);
17806 rack_seq = tp->snd_max;
17809 th->th_seq = htonl(rsm->r_start);
17810 rack_seq = rsm->r_start;
17812 th->th_ack = htonl(tp->rcv_nxt);
17813 th->th_flags = flags;
17815 * Calculate receive window. Don't shrink window, but avoid silly
17817 * If a RST segment is sent, advertise a window of zero.
17819 if (flags & TH_RST) {
17822 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
17823 recwin < (long)segsiz) {
17826 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
17827 recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
17828 recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
17832 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
17833 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is
17834 * handled in syncache.
17836 if (flags & TH_SYN)
17837 th->th_win = htons((u_short)
17838 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
17840 /* Avoid shrinking window with window scaling. */
17841 recwin = roundup2(recwin, 1 << tp->rcv_scale);
17842 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
17845 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
17846 * window. This may cause the remote transmitter to stall. This
17847 * flag tells soreceive() to disable delayed acknowledgements when
17848 * draining the buffer. This can occur if the receiver is
17849 * attempting to read more data than can be buffered prior to
17850 * transmitting on the connection.
17852 if (th->th_win == 0) {
17853 tp->t_sndzerowin++;
17854 tp->t_flags |= TF_RXWIN0SENT;
17856 tp->t_flags &= ~TF_RXWIN0SENT;
17857 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
17858 /* Now are we using fsb?, if so copy the template data to the mbuf */
17859 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
17862 cpto = mtod(m, uint8_t *);
17863 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
17865 * We have just copied in:
17867 * <optional udphdr>
17868 * tcphdr (no options)
17870 * We need to grab the correct pointers into the mbuf
17871 * for both the tcp header, and possibly the udp header (if tunneling).
17872 * We do this by using the offset in the copy buffer and adding it
17873 * to the mbuf base pointer (cpto).
17877 ip6 = mtod(m, struct ip6_hdr *);
17880 ip = mtod(m, struct ip *);
17881 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
17882 /* If we have a udp header lets set it into the mbuf as well */
17884 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr));
17886 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
17887 if (to.to_flags & TOF_SIGNATURE) {
17889 * Calculate MD5 signature and put it into the place
17890 * determined before.
17891 * NOTE: since TCP options buffer doesn't point into
17892 * mbuf's data, calculate offset and use it.
17894 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
17895 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
17897 * Do not send segment if the calculation of MD5
17898 * digest has failed.
17905 bcopy(opt, th + 1, optlen);
17906 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
17909 * Put TCP length in extended header, and then checksum extended
17912 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
17916 * ip6_plen is not need to be filled now, and will be filled
17920 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
17921 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
17922 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
17923 th->th_sum = htons(0);
17924 UDPSTAT_INC(udps_opackets);
17926 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
17927 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
17928 th->th_sum = in6_cksum_pseudo(ip6,
17929 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
17934 #if defined(INET6) && defined(INET)
17940 m->m_pkthdr.csum_flags = CSUM_UDP;
17941 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
17942 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
17943 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
17944 th->th_sum = htons(0);
17945 UDPSTAT_INC(udps_opackets);
17947 m->m_pkthdr.csum_flags = CSUM_TCP;
17948 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
17949 th->th_sum = in_pseudo(ip->ip_src.s_addr,
17950 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
17951 IPPROTO_TCP + len + optlen));
17953 /* IP version must be set here for ipv4/ipv6 checking later */
17954 KASSERT(ip->ip_v == IPVERSION,
17955 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
17959 * Enable TSO and specify the size of the segments. The TCP pseudo
17960 * header checksum is always provided. XXX: Fixme: This is currently
17961 * not the case for IPv6.
17964 KASSERT(len > tp->t_maxseg - optlen,
17965 ("%s: len <= tso_segsz", __func__));
17966 m->m_pkthdr.csum_flags |= CSUM_TSO;
17967 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
17969 KASSERT(len + hdrlen == m_length(m, NULL),
17970 ("%s: mbuf chain different than expected: %d + %u != %u",
17971 __func__, len, hdrlen, m_length(m, NULL)));
17974 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
17975 hhook_run_tcp_est_out(tp, th, &to, len, tso);
17977 /* We're getting ready to send; log now. */
17978 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
17979 union tcp_log_stackspecific log;
17981 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
17982 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
17983 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
17984 if (rack->rack_no_prr)
17985 log.u_bbr.flex1 = 0;
17987 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
17988 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
17989 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
17990 log.u_bbr.flex4 = orig_len;
17992 log.u_bbr.flex5 = 0x80000000;
17994 log.u_bbr.flex5 = 0;
17995 /* Save off the early/late values */
17996 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
17997 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
17998 log.u_bbr.bw_inuse = rack_get_bw(rack);
17999 if (rsm || sack_rxmit) {
18001 log.u_bbr.flex8 = 2;
18003 log.u_bbr.flex8 = 1;
18005 log.u_bbr.flex8 = 0;
18007 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
18008 log.u_bbr.flex7 = mark;
18009 log.u_bbr.flex7 <<= 8;
18010 log.u_bbr.flex7 |= pass;
18011 log.u_bbr.pkts_out = tp->t_maxseg;
18012 log.u_bbr.timeStamp = cts;
18013 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
18014 log.u_bbr.lt_epoch = cwnd_to_use;
18015 log.u_bbr.delivered = sendalot;
18016 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
18017 len, &log, false, NULL, NULL, 0, &tv);
18022 * Fill in IP length and desired time to live and send to IP level.
18023 * There should be a better way to handle ttl and tos; we could keep
18024 * them in the template, but need a way to checksum without them.
18027 * m->m_pkthdr.len should have been set before cksum calcuration,
18028 * because in6_cksum() need it.
18033 * we separately set hoplimit for every segment, since the
18034 * user might want to change the value via setsockopt. Also,
18035 * desired default hop limit might be changed via Neighbor
18038 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL);
18041 * Set the packet size here for the benefit of DTrace
18042 * probes. ip6_output() will set it properly; it's supposed
18043 * to include the option header lengths as well.
18045 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
18047 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
18048 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
18050 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
18052 if (tp->t_state == TCPS_SYN_SENT)
18053 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
18055 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
18056 /* TODO: IPv6 IP6TOS_ECT bit on */
18057 error = ip6_output(m,
18058 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
18059 inp->in6p_outputopts,
18064 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0),
18067 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
18068 mtu = inp->inp_route6.ro_nh->nh_mtu;
18071 #if defined(INET) && defined(INET6)
18076 ip->ip_len = htons(m->m_pkthdr.len);
18078 if (inp->inp_vflag & INP_IPV6PROTO)
18079 ip->ip_ttl = in6_selecthlim(inp, NULL);
18081 rack->r_ctl.fsb.hoplimit = ip->ip_ttl;
18083 * If we do path MTU discovery, then we set DF on every
18084 * packet. This might not be the best thing to do according
18085 * to RFC3390 Section 2. However the tcp hostcache migitates
18086 * the problem so it affects only the first tcp connection
18089 * NB: Don't set DF on small MTU/MSS to have a safe
18092 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
18093 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
18094 if (tp->t_port == 0 || len < V_tcp_minmss) {
18095 ip->ip_off |= htons(IP_DF);
18098 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
18101 if (tp->t_state == TCPS_SYN_SENT)
18102 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
18104 TCP_PROBE5(send, NULL, tp, ip, tp, th);
18106 error = ip_output(m,
18107 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
18113 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0,
18115 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
18116 mtu = inp->inp_route.ro_nh->nh_mtu;
18122 lgb->tlb_errno = error;
18126 * In transmit state, time the transmission and arrange for the
18127 * retransmit. In persist state, just set snd_max.
18130 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls);
18131 rack->forced_ack = 0; /* If we send something zap the FA flag */
18132 if (rsm && (doing_tlp == 0)) {
18133 /* Set we retransmitted */
18134 rack->rc_gp_saw_rec = 1;
18136 if (cwnd_to_use > tp->snd_ssthresh) {
18137 /* Set we sent in CA */
18138 rack->rc_gp_saw_ca = 1;
18140 /* Set we sent in SS */
18141 rack->rc_gp_saw_ss = 1;
18144 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
18145 (tp->t_flags & TF_SACK_PERMIT) &&
18146 tp->rcv_numsacks > 0)
18147 tcp_clean_dsack_blocks(tp);
18148 tot_len_this_send += len;
18150 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
18151 else if (len == 1) {
18152 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
18153 } else if (len > 1) {
18156 idx = (len / segsiz) + 3;
18157 if (idx >= TCP_MSS_ACCT_ATIMER)
18158 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
18160 counter_u64_add(rack_out_size[idx], 1);
18163 if ((rack->rack_no_prr == 0) &&
18166 if (rack->r_ctl.rc_prr_sndcnt >= len)
18167 rack->r_ctl.rc_prr_sndcnt -= len;
18169 rack->r_ctl.rc_prr_sndcnt = 0;
18172 if (doing_tlp && (rsm == NULL)) {
18173 /* New send doing a TLP */
18174 add_flag |= RACK_TLP;
18176 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error,
18177 rack_to_usec_ts(&tv),
18178 rsm, add_flag, s_mb, s_moff, hw_tls);
18181 if ((error == 0) &&
18183 (tp->snd_una == tp->snd_max))
18184 rack->r_ctl.rc_tlp_rxt_last_time = cts;
18186 tcp_seq startseq = tp->snd_nxt;
18188 /* Track our lost count */
18189 if (rsm && (doing_tlp == 0))
18190 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start;
18192 * Advance snd_nxt over sequence space of this segment.
18195 /* We don't log or do anything with errors */
18197 if (doing_tlp == 0) {
18200 * Not a retransmission of some
18201 * sort, new data is going out so
18202 * clear our TLP count and flag.
18204 rack->rc_tlp_in_progress = 0;
18205 rack->r_ctl.rc_tlp_cnt_out = 0;
18209 * We have just sent a TLP, mark that it is true
18210 * and make sure our in progress is set so we
18211 * continue to check the count.
18213 rack->rc_tlp_in_progress = 1;
18214 rack->r_ctl.rc_tlp_cnt_out++;
18216 if (flags & (TH_SYN | TH_FIN)) {
18217 if (flags & TH_SYN)
18219 if (flags & TH_FIN) {
18221 tp->t_flags |= TF_SENTFIN;
18224 /* In the ENOBUFS case we do *not* update snd_max */
18228 tp->snd_nxt += len;
18229 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
18230 if (tp->snd_una == tp->snd_max) {
18232 * Update the time we just added data since
18233 * none was outstanding.
18235 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
18236 tp->t_acktime = ticks;
18238 tp->snd_max = tp->snd_nxt;
18240 * Time this transmission if not a retransmission and
18241 * not currently timing anything.
18242 * This is only relevant in case of switching back to
18245 if (tp->t_rtttime == 0) {
18246 tp->t_rtttime = ticks;
18247 tp->t_rtseq = startseq;
18248 KMOD_TCPSTAT_INC(tcps_segstimed);
18251 ((tp->t_flags & TF_GPUTINPROG) == 0))
18252 rack_start_gp_measurement(tp, rack, startseq, sb_offset);
18255 * If we are doing FO we need to update the mbuf position and subtract
18256 * this happens when the peer sends us duplicate information and
18257 * we thus want to send a DSACK.
18259 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO
18260 * turned off? If not then we are going to echo multiple DSACK blocks
18261 * out (with the TSO), which we should not be doing.
18263 if (rack->r_fast_output && len) {
18264 if (rack->r_ctl.fsb.left_to_send > len)
18265 rack->r_ctl.fsb.left_to_send -= len;
18267 rack->r_ctl.fsb.left_to_send = 0;
18268 if (rack->r_ctl.fsb.left_to_send < segsiz)
18269 rack->r_fast_output = 0;
18270 if (rack->r_fast_output) {
18271 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
18272 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
18278 rack->r_ctl.rc_agg_delayed = 0;
18281 rack->r_ctl.rc_agg_early = 0;
18282 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */
18284 * Failures do not advance the seq counter above. For the
18285 * case of ENOBUFS we will fall out and retry in 1ms with
18286 * the hpts. Everything else will just have to retransmit
18289 * In any case, we do not want to loop around for another
18290 * send without a good reason.
18295 tp->t_softerror = error;
18296 #ifdef TCP_ACCOUNTING
18297 crtsc = get_cyclecount();
18298 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18299 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
18301 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
18302 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18303 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
18305 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
18311 * Pace us right away to retry in a some
18314 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
18315 if (rack->rc_enobuf < 0x7f)
18317 if (slot < (10 * HPTS_USEC_IN_MSEC))
18318 slot = 10 * HPTS_USEC_IN_MSEC;
18319 if (rack->r_ctl.crte != NULL) {
18320 counter_u64_add(rack_saw_enobuf_hw, 1);
18321 tcp_rl_log_enobuf(rack->r_ctl.crte);
18323 counter_u64_add(rack_saw_enobuf, 1);
18327 * For some reason the interface we used initially
18328 * to send segments changed to another or lowered
18329 * its MTU. If TSO was active we either got an
18330 * interface without TSO capabilits or TSO was
18331 * turned off. If we obtained mtu from ip_output()
18332 * then update it and try again.
18335 tp->t_flags &= ~TF_TSO;
18337 tcp_mss_update(tp, -1, mtu, NULL, NULL);
18340 slot = 10 * HPTS_USEC_IN_MSEC;
18341 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
18342 #ifdef TCP_ACCOUNTING
18343 crtsc = get_cyclecount();
18344 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18345 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
18347 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
18348 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18349 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
18351 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
18356 counter_u64_add(rack_saw_enetunreach, 1);
18360 if (TCPS_HAVERCVDSYN(tp->t_state)) {
18361 tp->t_softerror = error;
18365 slot = 10 * HPTS_USEC_IN_MSEC;
18366 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
18367 #ifdef TCP_ACCOUNTING
18368 crtsc = get_cyclecount();
18369 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18370 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
18372 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
18373 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18374 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
18376 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
18382 rack->rc_enobuf = 0;
18383 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
18384 rack->r_ctl.retran_during_recovery += len;
18386 KMOD_TCPSTAT_INC(tcps_sndtotal);
18389 * Data sent (as far as we can tell). If this advertises a larger
18390 * window than any other segment, then remember the size of the
18391 * advertised window. Any pending ACK has now been sent.
18393 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
18394 tp->rcv_adv = tp->rcv_nxt + recwin;
18396 tp->last_ack_sent = tp->rcv_nxt;
18397 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
18400 /* Do we need to turn off sendalot? */
18401 if (rack->r_ctl.rc_pace_max_segs &&
18402 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) {
18403 /* We hit our max. */
18405 } else if ((rack->rc_user_set_max_segs) &&
18406 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) {
18407 /* We hit the user defined max */
18411 if ((error == 0) && (flags & TH_FIN))
18412 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN);
18413 if (flags & TH_RST) {
18415 * We don't send again after sending a RST.
18420 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
18421 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) {
18423 * Get our pacing rate, if an error
18424 * occurred in sending (ENOBUF) we would
18425 * hit the else if with slot preset. Other
18428 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz);
18431 (rsm->r_flags & RACK_HAS_SYN) == 0 &&
18432 rack->use_rack_rr) {
18433 /* Its a retransmit and we use the rack cheat? */
18435 (rack->rc_always_pace == 0) ||
18436 (rack->r_rr_config == 1)) {
18438 * We have no pacing set or we
18439 * are using old-style rack or
18440 * we are overriden to use the old 1ms pacing.
18442 slot = rack->r_ctl.rc_min_to;
18445 /* We have sent clear the flag */
18446 rack->r_ent_rec_ns = 0;
18447 if (rack->r_must_retran) {
18449 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
18450 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
18452 * We have retransmitted all.
18454 rack->r_must_retran = 0;
18455 rack->r_ctl.rc_out_at_rto = 0;
18457 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
18459 * Sending new data will also kill
18462 rack->r_must_retran = 0;
18463 rack->r_ctl.rc_out_at_rto = 0;
18466 rack->r_ctl.fsb.recwin = recwin;
18467 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) &&
18468 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
18470 * We hit an RTO and now have past snd_max at the RTO
18471 * clear all the WAS flags.
18473 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY);
18476 /* set the rack tcb into the slot N */
18477 counter_u64_add(rack_paced_segments, 1);
18478 if ((error == 0) &&
18480 ((flags & (TH_SYN|TH_FIN)) == 0) &&
18482 (tp->snd_nxt == tp->snd_max) &&
18484 (tp->rcv_numsacks == 0) &&
18485 rack->r_fsb_inited &&
18486 TCPS_HAVEESTABLISHED(tp->t_state) &&
18487 (rack->r_must_retran == 0) &&
18488 ((tp->t_flags & TF_NEEDFIN) == 0) &&
18489 (len > 0) && (orig_len > 0) &&
18490 (orig_len > len) &&
18491 ((orig_len - len) >= segsiz) &&
18493 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
18494 /* We can send at least one more MSS using our fsb */
18496 rack->r_fast_output = 1;
18497 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
18498 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
18499 rack->r_ctl.fsb.tcp_flags = flags;
18500 rack->r_ctl.fsb.left_to_send = orig_len - len;
18502 rack->r_ctl.fsb.hw_tls = 1;
18504 rack->r_ctl.fsb.hw_tls = 0;
18505 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
18506 ("rack:%p left_to_send:%u sbavail:%u out:%u",
18507 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
18508 (tp->snd_max - tp->snd_una)));
18509 if (rack->r_ctl.fsb.left_to_send < segsiz)
18510 rack->r_fast_output = 0;
18512 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
18513 rack->r_ctl.fsb.rfo_apply_push = 1;
18515 rack->r_ctl.fsb.rfo_apply_push = 0;
18518 rack->r_fast_output = 0;
18519 rack_log_fsb(rack, tp, so, flags,
18520 ipoptlen, orig_len, len, error,
18521 (rsm == NULL), optlen, __LINE__, 2);
18522 } else if (sendalot) {
18526 counter_u64_add(rack_unpaced_segments, 1);
18528 if ((error == 0) &&
18530 ((flags & (TH_SYN|TH_FIN)) == 0) &&
18533 (tp->rcv_numsacks == 0) &&
18534 (tp->snd_nxt == tp->snd_max) &&
18535 (rack->r_must_retran == 0) &&
18536 rack->r_fsb_inited &&
18537 TCPS_HAVEESTABLISHED(tp->t_state) &&
18538 ((tp->t_flags & TF_NEEDFIN) == 0) &&
18539 (len > 0) && (orig_len > 0) &&
18540 (orig_len > len) &&
18541 ((orig_len - len) >= segsiz) &&
18543 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
18544 /* we can use fast_output for more */
18546 rack->r_fast_output = 1;
18547 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
18548 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
18549 rack->r_ctl.fsb.tcp_flags = flags;
18550 rack->r_ctl.fsb.left_to_send = orig_len - len;
18552 rack->r_ctl.fsb.hw_tls = 1;
18554 rack->r_ctl.fsb.hw_tls = 0;
18555 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
18556 ("rack:%p left_to_send:%u sbavail:%u out:%u",
18557 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
18558 (tp->snd_max - tp->snd_una)));
18559 if (rack->r_ctl.fsb.left_to_send < segsiz) {
18560 rack->r_fast_output = 0;
18562 if (rack->r_fast_output) {
18563 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
18564 rack->r_ctl.fsb.rfo_apply_push = 1;
18566 rack->r_ctl.fsb.rfo_apply_push = 0;
18567 rack_log_fsb(rack, tp, so, flags,
18568 ipoptlen, orig_len, len, error,
18569 (rsm == NULL), optlen, __LINE__, 3);
18571 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
18581 counter_u64_add(rack_unpaced_segments, 1);
18583 /* Assure when we leave that snd_nxt will point to top */
18584 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
18585 tp->snd_nxt = tp->snd_max;
18586 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0);
18587 #ifdef TCP_ACCOUNTING
18588 crtsc = get_cyclecount() - ts_val;
18589 if (tot_len_this_send) {
18590 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18591 tp->tcp_cnt_counters[SND_OUT_DATA]++;
18593 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1);
18594 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18595 tp->tcp_proc_time[SND_OUT_DATA] += crtsc;
18597 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc);
18598 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18599 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz);
18601 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz));
18603 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18604 tp->tcp_cnt_counters[SND_OUT_ACK]++;
18606 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1);
18607 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18608 tp->tcp_proc_time[SND_OUT_ACK] += crtsc;
18610 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc);
18614 if (error == ENOBUFS)
18620 rack_update_seg(struct tcp_rack *rack)
18624 orig_val = rack->r_ctl.rc_pace_max_segs;
18625 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
18626 if (orig_val != rack->r_ctl.rc_pace_max_segs)
18627 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL);
18631 rack_mtu_change(struct tcpcb *tp)
18634 * The MSS may have changed
18636 struct tcp_rack *rack;
18638 rack = (struct tcp_rack *)tp->t_fb_ptr;
18639 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) {
18641 * The MTU has changed we need to resend everything
18642 * since all we have sent is lost. We first fix
18643 * up the mtu though.
18645 rack_set_pace_segments(tp, rack, __LINE__, NULL);
18646 /* We treat this like a full retransmit timeout without the cwnd adjustment */
18647 rack_remxt_tmr(tp);
18648 rack->r_fast_output = 0;
18649 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp,
18650 rack->r_ctl.rc_sacked);
18651 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
18652 rack->r_must_retran = 1;
18655 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
18656 /* We don't use snd_nxt to retransmit */
18657 tp->snd_nxt = tp->snd_max;
18661 rack_set_profile(struct tcp_rack *rack, int prof)
18665 /* pace_always=1 */
18666 if (rack->rc_always_pace == 0) {
18667 if (tcp_can_enable_pacing() == 0)
18670 rack->rc_always_pace = 1;
18671 if (rack->use_fixed_rate || rack->gp_ready)
18672 rack_set_cc_pacing(rack);
18673 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18674 rack->rack_attempt_hdwr_pace = 0;
18676 if (rack_use_cmp_acks)
18677 rack->r_use_cmp_ack = 1;
18678 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
18679 rack->r_use_cmp_ack)
18680 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18682 rack->rack_enable_scwnd = 1;
18684 rack->rc_gp_dyn_mul = 1;
18686 rack->r_ctl.rack_per_of_gp_ca = 100;
18688 rack->r_rr_config = 3;
18690 rack->r_ctl.rc_no_push_at_mrtt = 2;
18692 rack->rc_pace_to_cwnd = 1;
18693 rack->rc_pace_fill_if_rttin_range = 0;
18694 rack->rtt_limit_mul = 0;
18696 rack->rack_no_prr = 1;
18698 rack->r_limit_scw = 1;
18700 rack->r_ctl.rack_per_of_gp_rec = 90;
18703 } else if (prof == 3) {
18704 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */
18705 /* pace_always=1 */
18706 if (rack->rc_always_pace == 0) {
18707 if (tcp_can_enable_pacing() == 0)
18710 rack->rc_always_pace = 1;
18711 if (rack->use_fixed_rate || rack->gp_ready)
18712 rack_set_cc_pacing(rack);
18713 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18714 rack->rack_attempt_hdwr_pace = 0;
18716 if (rack_use_cmp_acks)
18717 rack->r_use_cmp_ack = 1;
18718 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
18719 rack->r_use_cmp_ack)
18720 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18722 rack->rack_enable_scwnd = 1;
18724 rack->rc_gp_dyn_mul = 1;
18726 rack->r_ctl.rack_per_of_gp_ca = 100;
18728 rack->r_rr_config = 3;
18730 rack->r_ctl.rc_no_push_at_mrtt = 2;
18732 rack->rc_pace_to_cwnd = 1;
18733 rack->r_fill_less_agg = 1;
18734 rack->rc_pace_fill_if_rttin_range = 0;
18735 rack->rtt_limit_mul = 0;
18737 rack->rack_no_prr = 1;
18739 rack->r_limit_scw = 1;
18741 rack->r_ctl.rack_per_of_gp_rec = 90;
18745 } else if (prof == 2) {
18747 if (rack->rc_always_pace == 0) {
18748 if (tcp_can_enable_pacing() == 0)
18751 rack->rc_always_pace = 1;
18752 if (rack->use_fixed_rate || rack->gp_ready)
18753 rack_set_cc_pacing(rack);
18754 rack->r_use_cmp_ack = 1;
18755 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
18756 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18757 /* pace_always=1 */
18758 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18760 rack->rack_enable_scwnd = 1;
18762 rack->rc_gp_dyn_mul = 1;
18763 rack->r_ctl.rack_per_of_gp_ca = 100;
18765 rack->r_rr_config = 3;
18767 rack->r_ctl.rc_no_push_at_mrtt = 2;
18769 rack->rc_pace_to_cwnd = 1;
18770 rack->rc_pace_fill_if_rttin_range = 0;
18771 rack->rtt_limit_mul = 0;
18773 rack->rack_no_prr = 1;
18775 rack->r_limit_scw = 0;
18777 } else if (prof == 0) {
18778 /* This changes things back to the default settings */
18780 if (rack->rc_always_pace) {
18781 tcp_decrement_paced_conn();
18782 rack_undo_cc_pacing(rack);
18783 rack->rc_always_pace = 0;
18785 if (rack_pace_every_seg && tcp_can_enable_pacing()) {
18786 rack->rc_always_pace = 1;
18787 if (rack->use_fixed_rate || rack->gp_ready)
18788 rack_set_cc_pacing(rack);
18790 rack->rc_always_pace = 0;
18791 if (rack_use_cmp_acks)
18792 rack->r_use_cmp_ack = 1;
18794 rack->r_use_cmp_ack = 0;
18795 if (rack_disable_prr)
18796 rack->rack_no_prr = 1;
18798 rack->rack_no_prr = 0;
18799 if (rack_gp_no_rec_chg)
18800 rack->rc_gp_no_rec_chg = 1;
18802 rack->rc_gp_no_rec_chg = 0;
18803 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) {
18804 rack->r_mbuf_queue = 1;
18805 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
18806 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18807 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18809 rack->r_mbuf_queue = 0;
18810 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
18812 if (rack_enable_shared_cwnd)
18813 rack->rack_enable_scwnd = 1;
18815 rack->rack_enable_scwnd = 0;
18816 if (rack_do_dyn_mul) {
18817 /* When dynamic adjustment is on CA needs to start at 100% */
18818 rack->rc_gp_dyn_mul = 1;
18819 if (rack_do_dyn_mul >= 100)
18820 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
18822 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
18823 rack->rc_gp_dyn_mul = 0;
18825 rack->r_rr_config = 0;
18826 rack->r_ctl.rc_no_push_at_mrtt = 0;
18827 rack->rc_pace_to_cwnd = 0;
18828 rack->rc_pace_fill_if_rttin_range = 0;
18829 rack->rtt_limit_mul = 0;
18831 if (rack_enable_hw_pacing)
18832 rack->rack_hdw_pace_ena = 1;
18834 rack->rack_hdw_pace_ena = 0;
18835 if (rack_disable_prr)
18836 rack->rack_no_prr = 1;
18838 rack->rack_no_prr = 0;
18839 if (rack_limits_scwnd)
18840 rack->r_limit_scw = 1;
18842 rack->r_limit_scw = 0;
18849 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval)
18851 struct deferred_opt_list *dol;
18853 dol = malloc(sizeof(struct deferred_opt_list),
18854 M_TCPFSB, M_NOWAIT|M_ZERO);
18857 * No space yikes -- fail out..
18861 dol->optname = sopt_name;
18862 dol->optval = loptval;
18863 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next);
18868 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name,
18869 uint32_t optval, uint64_t loptval)
18871 struct epoch_tracker et;
18872 struct sockopt sopt;
18873 struct cc_newreno_opts opt;
18878 switch (sopt_name) {
18880 case TCP_RACK_PACING_BETA:
18881 RACK_OPTS_INC(tcp_rack_beta);
18882 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
18883 /* This only works for newreno. */
18887 if (rack->rc_pacing_cc_set) {
18889 * Set them into the real CC module
18890 * whats in the rack pcb is the old values
18891 * to be used on restoral/
18893 sopt.sopt_dir = SOPT_SET;
18894 opt.name = CC_NEWRENO_BETA;
18896 if (CC_ALGO(tp)->ctl_output != NULL)
18897 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
18904 * Not pacing yet so set it into our local
18905 * rack pcb storage.
18907 rack->r_ctl.rc_saved_beta.beta = optval;
18910 case TCP_RACK_TIMER_SLOP:
18911 RACK_OPTS_INC(tcp_rack_timer_slop);
18912 rack->r_ctl.timer_slop = optval;
18913 if (rack->rc_tp->t_srtt) {
18915 * If we have an SRTT lets update t_rxtcur
18916 * to have the new slop.
18918 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
18919 rack_rto_min, rack_rto_max,
18920 rack->r_ctl.timer_slop);
18923 case TCP_RACK_PACING_BETA_ECN:
18924 RACK_OPTS_INC(tcp_rack_beta_ecn);
18925 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
18926 /* This only works for newreno. */
18930 if (rack->rc_pacing_cc_set) {
18932 * Set them into the real CC module
18933 * whats in the rack pcb is the old values
18934 * to be used on restoral/
18936 sopt.sopt_dir = SOPT_SET;
18937 opt.name = CC_NEWRENO_BETA_ECN;
18939 if (CC_ALGO(tp)->ctl_output != NULL)
18940 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
18945 * Not pacing yet so set it into our local
18946 * rack pcb storage.
18948 rack->r_ctl.rc_saved_beta.beta_ecn = optval;
18949 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN;
18952 case TCP_DEFER_OPTIONS:
18953 RACK_OPTS_INC(tcp_defer_opt);
18955 if (rack->gp_ready) {
18960 rack->defer_options = 1;
18962 rack->defer_options = 0;
18964 case TCP_RACK_MEASURE_CNT:
18965 RACK_OPTS_INC(tcp_rack_measure_cnt);
18966 if (optval && (optval <= 0xff)) {
18967 rack->r_ctl.req_measurements = optval;
18971 case TCP_REC_ABC_VAL:
18972 RACK_OPTS_INC(tcp_rec_abc_val);
18974 rack->r_use_labc_for_rec = 1;
18976 rack->r_use_labc_for_rec = 0;
18978 case TCP_RACK_ABC_VAL:
18979 RACK_OPTS_INC(tcp_rack_abc_val);
18980 if ((optval > 0) && (optval < 255))
18981 rack->rc_labc = optval;
18985 case TCP_HDWR_UP_ONLY:
18986 RACK_OPTS_INC(tcp_pacing_up_only);
18988 rack->r_up_only = 1;
18990 rack->r_up_only = 0;
18992 case TCP_PACING_RATE_CAP:
18993 RACK_OPTS_INC(tcp_pacing_rate_cap);
18994 rack->r_ctl.bw_rate_cap = loptval;
18996 case TCP_RACK_PROFILE:
18997 RACK_OPTS_INC(tcp_profile);
18998 error = rack_set_profile(rack, optval);
19000 case TCP_USE_CMP_ACKS:
19001 RACK_OPTS_INC(tcp_use_cmp_acks);
19002 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) {
19003 /* You can't turn it off once its on! */
19005 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) {
19006 rack->r_use_cmp_ack = 1;
19007 rack->r_mbuf_queue = 1;
19008 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19010 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
19011 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
19013 case TCP_SHARED_CWND_TIME_LIMIT:
19014 RACK_OPTS_INC(tcp_lscwnd);
19016 rack->r_limit_scw = 1;
19018 rack->r_limit_scw = 0;
19020 case TCP_RACK_PACE_TO_FILL:
19021 RACK_OPTS_INC(tcp_fillcw);
19023 rack->rc_pace_to_cwnd = 0;
19025 rack->rc_pace_to_cwnd = 1;
19027 rack->r_fill_less_agg = 1;
19029 if ((optval >= rack_gp_rtt_maxmul) &&
19030 rack_gp_rtt_maxmul &&
19032 rack->rc_pace_fill_if_rttin_range = 1;
19033 rack->rtt_limit_mul = optval;
19035 rack->rc_pace_fill_if_rttin_range = 0;
19036 rack->rtt_limit_mul = 0;
19039 case TCP_RACK_NO_PUSH_AT_MAX:
19040 RACK_OPTS_INC(tcp_npush);
19042 rack->r_ctl.rc_no_push_at_mrtt = 0;
19043 else if (optval < 0xff)
19044 rack->r_ctl.rc_no_push_at_mrtt = optval;
19048 case TCP_SHARED_CWND_ENABLE:
19049 RACK_OPTS_INC(tcp_rack_scwnd);
19051 rack->rack_enable_scwnd = 0;
19053 rack->rack_enable_scwnd = 1;
19055 case TCP_RACK_MBUF_QUEUE:
19056 /* Now do we use the LRO mbuf-queue feature */
19057 RACK_OPTS_INC(tcp_rack_mbufq);
19058 if (optval || rack->r_use_cmp_ack)
19059 rack->r_mbuf_queue = 1;
19061 rack->r_mbuf_queue = 0;
19062 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
19063 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19065 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
19067 case TCP_RACK_NONRXT_CFG_RATE:
19068 RACK_OPTS_INC(tcp_rack_cfg_rate);
19070 rack->rack_rec_nonrxt_use_cr = 0;
19072 rack->rack_rec_nonrxt_use_cr = 1;
19075 RACK_OPTS_INC(tcp_rack_noprr);
19077 rack->rack_no_prr = 0;
19078 else if (optval == 1)
19079 rack->rack_no_prr = 1;
19080 else if (optval == 2)
19081 rack->no_prr_addback = 1;
19085 case TCP_TIMELY_DYN_ADJ:
19086 RACK_OPTS_INC(tcp_timely_dyn);
19088 rack->rc_gp_dyn_mul = 0;
19090 rack->rc_gp_dyn_mul = 1;
19091 if (optval >= 100) {
19093 * If the user sets something 100 or more
19094 * its the gp_ca value.
19096 rack->r_ctl.rack_per_of_gp_ca = optval;
19100 case TCP_RACK_DO_DETECTION:
19101 RACK_OPTS_INC(tcp_rack_do_detection);
19103 rack->do_detection = 0;
19105 rack->do_detection = 1;
19107 case TCP_RACK_TLP_USE:
19108 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
19112 RACK_OPTS_INC(tcp_tlp_use);
19113 rack->rack_tlp_threshold_use = optval;
19115 case TCP_RACK_TLP_REDUCE:
19116 /* RACK TLP cwnd reduction (bool) */
19117 RACK_OPTS_INC(tcp_rack_tlp_reduce);
19118 rack->r_ctl.rc_tlp_cwnd_reduce = optval;
19120 /* Pacing related ones */
19121 case TCP_RACK_PACE_ALWAYS:
19123 * zero is old rack method, 1 is new
19124 * method using a pacing rate.
19126 RACK_OPTS_INC(tcp_rack_pace_always);
19128 if (rack->rc_always_pace) {
19131 } else if (tcp_can_enable_pacing()) {
19132 rack->rc_always_pace = 1;
19133 if (rack->use_fixed_rate || rack->gp_ready)
19134 rack_set_cc_pacing(rack);
19141 if (rack->rc_always_pace) {
19142 tcp_decrement_paced_conn();
19143 rack->rc_always_pace = 0;
19144 rack_undo_cc_pacing(rack);
19147 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
19148 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19150 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
19151 /* A rate may be set irate or other, if so set seg size */
19152 rack_update_seg(rack);
19154 case TCP_BBR_RACK_INIT_RATE:
19155 RACK_OPTS_INC(tcp_initial_rate);
19157 /* Change from kbits per second to bytes per second */
19160 rack->r_ctl.init_rate = val;
19161 if (rack->rc_init_win != rack_default_init_window) {
19165 * Options don't always get applied
19166 * in the order you think. So in order
19167 * to assure we update a cwnd we need
19168 * to check and see if we are still
19169 * where we should raise the cwnd.
19171 win = rc_init_window(rack);
19172 if (SEQ_GT(tp->snd_max, tp->iss))
19173 snt = tp->snd_max - tp->iss;
19177 (tp->snd_cwnd < win))
19178 tp->snd_cwnd = win;
19180 if (rack->rc_always_pace)
19181 rack_update_seg(rack);
19183 case TCP_BBR_IWINTSO:
19184 RACK_OPTS_INC(tcp_initial_win);
19185 if (optval && (optval <= 0xff)) {
19188 rack->rc_init_win = optval;
19189 win = rc_init_window(rack);
19190 if (SEQ_GT(tp->snd_max, tp->iss))
19191 snt = tp->snd_max - tp->iss;
19196 #ifdef NETFLIX_PEAKRATE
19197 tp->t_maxpeakrate |
19199 rack->r_ctl.init_rate)) {
19201 * We are not past the initial window
19202 * and we have some bases for pacing,
19203 * so we need to possibly adjust up
19204 * the cwnd. Note even if we don't set
19205 * the cwnd, its still ok to raise the rc_init_win
19206 * which can be used coming out of idle when we
19207 * would have a rate.
19209 if (tp->snd_cwnd < win)
19210 tp->snd_cwnd = win;
19212 if (rack->rc_always_pace)
19213 rack_update_seg(rack);
19217 case TCP_RACK_FORCE_MSEG:
19218 RACK_OPTS_INC(tcp_rack_force_max_seg);
19220 rack->rc_force_max_seg = 1;
19222 rack->rc_force_max_seg = 0;
19224 case TCP_RACK_PACE_MAX_SEG:
19225 /* Max segments size in a pace in bytes */
19226 RACK_OPTS_INC(tcp_rack_max_seg);
19227 rack->rc_user_set_max_segs = optval;
19228 rack_set_pace_segments(tp, rack, __LINE__, NULL);
19230 case TCP_RACK_PACE_RATE_REC:
19231 /* Set the fixed pacing rate in Bytes per second ca */
19232 RACK_OPTS_INC(tcp_rack_pace_rate_rec);
19233 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
19234 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
19235 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
19236 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
19237 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
19238 rack->use_fixed_rate = 1;
19239 if (rack->rc_always_pace)
19240 rack_set_cc_pacing(rack);
19241 rack_log_pacing_delay_calc(rack,
19242 rack->r_ctl.rc_fixed_pacing_rate_ss,
19243 rack->r_ctl.rc_fixed_pacing_rate_ca,
19244 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
19248 case TCP_RACK_PACE_RATE_SS:
19249 /* Set the fixed pacing rate in Bytes per second ca */
19250 RACK_OPTS_INC(tcp_rack_pace_rate_ss);
19251 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
19252 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
19253 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
19254 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
19255 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
19256 rack->use_fixed_rate = 1;
19257 if (rack->rc_always_pace)
19258 rack_set_cc_pacing(rack);
19259 rack_log_pacing_delay_calc(rack,
19260 rack->r_ctl.rc_fixed_pacing_rate_ss,
19261 rack->r_ctl.rc_fixed_pacing_rate_ca,
19262 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
19266 case TCP_RACK_PACE_RATE_CA:
19267 /* Set the fixed pacing rate in Bytes per second ca */
19268 RACK_OPTS_INC(tcp_rack_pace_rate_ca);
19269 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
19270 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
19271 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
19272 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
19273 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
19274 rack->use_fixed_rate = 1;
19275 if (rack->rc_always_pace)
19276 rack_set_cc_pacing(rack);
19277 rack_log_pacing_delay_calc(rack,
19278 rack->r_ctl.rc_fixed_pacing_rate_ss,
19279 rack->r_ctl.rc_fixed_pacing_rate_ca,
19280 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
19283 case TCP_RACK_GP_INCREASE_REC:
19284 RACK_OPTS_INC(tcp_gp_inc_rec);
19285 rack->r_ctl.rack_per_of_gp_rec = optval;
19286 rack_log_pacing_delay_calc(rack,
19287 rack->r_ctl.rack_per_of_gp_ss,
19288 rack->r_ctl.rack_per_of_gp_ca,
19289 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
19292 case TCP_RACK_GP_INCREASE_CA:
19293 RACK_OPTS_INC(tcp_gp_inc_ca);
19297 * We don't allow any reduction
19303 rack->r_ctl.rack_per_of_gp_ca = ca;
19304 rack_log_pacing_delay_calc(rack,
19305 rack->r_ctl.rack_per_of_gp_ss,
19306 rack->r_ctl.rack_per_of_gp_ca,
19307 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
19310 case TCP_RACK_GP_INCREASE_SS:
19311 RACK_OPTS_INC(tcp_gp_inc_ss);
19315 * We don't allow any reduction
19321 rack->r_ctl.rack_per_of_gp_ss = ss;
19322 rack_log_pacing_delay_calc(rack,
19323 rack->r_ctl.rack_per_of_gp_ss,
19324 rack->r_ctl.rack_per_of_gp_ca,
19325 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
19328 case TCP_RACK_RR_CONF:
19329 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate);
19330 if (optval && optval <= 3)
19331 rack->r_rr_config = optval;
19333 rack->r_rr_config = 0;
19335 case TCP_HDWR_RATE_CAP:
19336 RACK_OPTS_INC(tcp_hdwr_rate_cap);
19338 if (rack->r_rack_hw_rate_caps == 0)
19339 rack->r_rack_hw_rate_caps = 1;
19343 rack->r_rack_hw_rate_caps = 0;
19346 case TCP_BBR_HDWR_PACE:
19347 RACK_OPTS_INC(tcp_hdwr_pacing);
19349 if (rack->rack_hdrw_pacing == 0) {
19350 rack->rack_hdw_pace_ena = 1;
19351 rack->rack_attempt_hdwr_pace = 0;
19355 rack->rack_hdw_pace_ena = 0;
19357 if (rack->r_ctl.crte != NULL) {
19358 rack->rack_hdrw_pacing = 0;
19359 rack->rack_attempt_hdwr_pace = 0;
19360 tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
19361 rack->r_ctl.crte = NULL;
19366 /* End Pacing related ones */
19367 case TCP_RACK_PRR_SENDALOT:
19368 /* Allow PRR to send more than one seg */
19369 RACK_OPTS_INC(tcp_rack_prr_sendalot);
19370 rack->r_ctl.rc_prr_sendalot = optval;
19372 case TCP_RACK_MIN_TO:
19373 /* Minimum time between rack t-o's in ms */
19374 RACK_OPTS_INC(tcp_rack_min_to);
19375 rack->r_ctl.rc_min_to = optval;
19377 case TCP_RACK_EARLY_SEG:
19378 /* If early recovery max segments */
19379 RACK_OPTS_INC(tcp_rack_early_seg);
19380 rack->r_ctl.rc_early_recovery_segs = optval;
19382 case TCP_RACK_REORD_THRESH:
19383 /* RACK reorder threshold (shift amount) */
19384 RACK_OPTS_INC(tcp_rack_reord_thresh);
19385 if ((optval > 0) && (optval < 31))
19386 rack->r_ctl.rc_reorder_shift = optval;
19390 case TCP_RACK_REORD_FADE:
19391 /* Does reordering fade after ms time */
19392 RACK_OPTS_INC(tcp_rack_reord_fade);
19393 rack->r_ctl.rc_reorder_fade = optval;
19395 case TCP_RACK_TLP_THRESH:
19396 /* RACK TLP theshold i.e. srtt+(srtt/N) */
19397 RACK_OPTS_INC(tcp_rack_tlp_thresh);
19399 rack->r_ctl.rc_tlp_threshold = optval;
19403 case TCP_BBR_USE_RACK_RR:
19404 RACK_OPTS_INC(tcp_rack_rr);
19406 rack->use_rack_rr = 1;
19408 rack->use_rack_rr = 0;
19410 case TCP_FAST_RSM_HACK:
19411 RACK_OPTS_INC(tcp_rack_fastrsm_hack);
19413 rack->fast_rsm_hack = 1;
19415 rack->fast_rsm_hack = 0;
19417 case TCP_RACK_PKT_DELAY:
19418 /* RACK added ms i.e. rack-rtt + reord + N */
19419 RACK_OPTS_INC(tcp_rack_pkt_delay);
19420 rack->r_ctl.rc_pkt_delay = optval;
19423 RACK_OPTS_INC(tcp_rack_delayed_ack);
19425 tp->t_delayed_ack = 0;
19427 tp->t_delayed_ack = 1;
19428 if (tp->t_flags & TF_DELACK) {
19429 tp->t_flags &= ~TF_DELACK;
19430 tp->t_flags |= TF_ACKNOW;
19431 NET_EPOCH_ENTER(et);
19433 NET_EPOCH_EXIT(et);
19437 case TCP_BBR_RACK_RTT_USE:
19438 RACK_OPTS_INC(tcp_rack_rtt_use);
19439 if ((optval != USE_RTT_HIGH) &&
19440 (optval != USE_RTT_LOW) &&
19441 (optval != USE_RTT_AVG))
19444 rack->r_ctl.rc_rate_sample_method = optval;
19446 case TCP_DATA_AFTER_CLOSE:
19447 RACK_OPTS_INC(tcp_data_after_close);
19449 rack->rc_allow_data_af_clo = 1;
19451 rack->rc_allow_data_af_clo = 0;
19456 #ifdef NETFLIX_STATS
19457 tcp_log_socket_option(tp, sopt_name, optval, error);
19464 rack_apply_deferred_options(struct tcp_rack *rack)
19466 struct deferred_opt_list *dol, *sdol;
19469 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) {
19470 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
19471 /* Disadvantage of deferal is you loose the error return */
19472 s_optval = (uint32_t)dol->optval;
19473 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval);
19474 free(dol, M_TCPDO);
19479 rack_hw_tls_change(struct tcpcb *tp, int chg)
19482 * HW tls state has changed.. fix all
19485 struct tcp_rack *rack;
19486 struct rack_sendmap *rsm;
19488 rack = (struct tcp_rack *)tp->t_fb_ptr;
19489 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
19496 rack->r_ctl.fsb.hw_tls = 1;
19498 rack->r_ctl.fsb.hw_tls = 0;
19502 rack_pru_options(struct tcpcb *tp, int flags)
19504 if (flags & PRUS_OOB)
19505 return (EOPNOTSUPP);
19509 static struct tcp_function_block __tcp_rack = {
19510 .tfb_tcp_block_name = __XSTRING(STACKNAME),
19511 .tfb_tcp_output = rack_output,
19512 .tfb_do_queued_segments = ctf_do_queued_segments,
19513 .tfb_do_segment_nounlock = rack_do_segment_nounlock,
19514 .tfb_tcp_do_segment = rack_do_segment,
19515 .tfb_tcp_ctloutput = rack_ctloutput,
19516 .tfb_tcp_fb_init = rack_init,
19517 .tfb_tcp_fb_fini = rack_fini,
19518 .tfb_tcp_timer_stop_all = rack_stopall,
19519 .tfb_tcp_timer_activate = rack_timer_activate,
19520 .tfb_tcp_timer_active = rack_timer_active,
19521 .tfb_tcp_timer_stop = rack_timer_stop,
19522 .tfb_tcp_rexmit_tmr = rack_remxt_tmr,
19523 .tfb_tcp_handoff_ok = rack_handoff_ok,
19524 .tfb_tcp_mtu_chg = rack_mtu_change,
19525 .tfb_pru_options = rack_pru_options,
19526 .tfb_hwtls_change = rack_hw_tls_change,
19530 * rack_ctloutput() must drop the inpcb lock before performing copyin on
19531 * socket option arguments. When it re-acquires the lock after the copy, it
19532 * has to revalidate that the connection is still valid for the socket
19536 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
19537 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
19540 int32_t error = 0, optval;
19542 switch (sopt->sopt_name) {
19543 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */
19544 /* Pacing related ones */
19545 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */
19546 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */
19547 case TCP_BBR_IWINTSO: /* URL:tso_iwin */
19548 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */
19549 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */
19550 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */
19551 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/
19552 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */
19553 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */
19554 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */
19555 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */
19556 case TCP_RACK_RR_CONF: /* URL:rrr_conf */
19557 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */
19558 case TCP_HDWR_RATE_CAP: /* URL: hdwrcap boolean */
19559 case TCP_PACING_RATE_CAP: /* URL:cap-- used by side-channel */
19560 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */
19561 /* End pacing related */
19562 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */
19563 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */
19564 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */
19565 case TCP_RACK_MIN_TO: /* URL:min_to */
19566 case TCP_RACK_EARLY_SEG: /* URL:early_seg */
19567 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */
19568 case TCP_RACK_REORD_FADE: /* URL:reord_fade */
19569 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */
19570 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */
19571 case TCP_RACK_TLP_USE: /* URL:tlp_use */
19572 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */
19573 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */
19574 case TCP_RACK_DO_DETECTION: /* URL:detect */
19575 case TCP_NO_PRR: /* URL:noprr */
19576 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */
19577 case TCP_DATA_AFTER_CLOSE: /* no URL */
19578 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */
19579 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */
19580 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */
19581 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */
19582 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */
19583 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */
19584 case TCP_RACK_PROFILE: /* URL:profile */
19585 case TCP_USE_CMP_ACKS: /* URL:cmpack */
19586 case TCP_RACK_ABC_VAL: /* URL:labc */
19587 case TCP_REC_ABC_VAL: /* URL:reclabc */
19588 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */
19589 case TCP_DEFER_OPTIONS: /* URL:defer */
19590 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */
19591 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */
19592 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */
19595 /* Filter off all unknown options to the base stack */
19596 return (tcp_default_ctloutput(so, sopt, inp, tp));
19600 if (sopt->sopt_name == TCP_PACING_RATE_CAP) {
19601 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval));
19603 * We truncate it down to 32 bits for the socket-option trace this
19604 * means rates > 34Gbps won't show right, but thats probably ok.
19606 optval = (uint32_t)loptval;
19608 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
19609 /* Save it in 64 bit form too */
19615 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
19617 return (ECONNRESET);
19619 if (tp->t_fb != &__tcp_rack) {
19621 return (ENOPROTOOPT);
19623 if (rack->defer_options && (rack->gp_ready == 0) &&
19624 (sopt->sopt_name != TCP_DEFER_OPTIONS) &&
19625 (sopt->sopt_name != TCP_RACK_PACING_BETA) &&
19626 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) &&
19627 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) {
19628 /* Options are beind deferred */
19629 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) {
19633 /* No memory to defer, fail */
19638 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval);
19644 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti)
19647 INP_WLOCK_ASSERT(tp->t_inpcb);
19648 bzero(ti, sizeof(*ti));
19650 ti->tcpi_state = tp->t_state;
19651 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
19652 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
19653 if (tp->t_flags & TF_SACK_PERMIT)
19654 ti->tcpi_options |= TCPI_OPT_SACK;
19655 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
19656 ti->tcpi_options |= TCPI_OPT_WSCALE;
19657 ti->tcpi_snd_wscale = tp->snd_scale;
19658 ti->tcpi_rcv_wscale = tp->rcv_scale;
19660 if (tp->t_flags2 & TF2_ECN_PERMIT)
19661 ti->tcpi_options |= TCPI_OPT_ECN;
19662 if (tp->t_flags & TF_FASTOPEN)
19663 ti->tcpi_options |= TCPI_OPT_TFO;
19664 /* still kept in ticks is t_rcvtime */
19665 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick;
19666 /* Since we hold everything in precise useconds this is easy */
19667 ti->tcpi_rtt = tp->t_srtt;
19668 ti->tcpi_rttvar = tp->t_rttvar;
19669 ti->tcpi_rto = tp->t_rxtcur;
19670 ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
19671 ti->tcpi_snd_cwnd = tp->snd_cwnd;
19673 * FreeBSD-specific extension fields for tcp_info.
19675 ti->tcpi_rcv_space = tp->rcv_wnd;
19676 ti->tcpi_rcv_nxt = tp->rcv_nxt;
19677 ti->tcpi_snd_wnd = tp->snd_wnd;
19678 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */
19679 ti->tcpi_snd_nxt = tp->snd_nxt;
19680 ti->tcpi_snd_mss = tp->t_maxseg;
19681 ti->tcpi_rcv_mss = tp->t_maxseg;
19682 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack;
19683 ti->tcpi_rcv_ooopack = tp->t_rcvoopack;
19684 ti->tcpi_snd_zerowin = tp->t_sndzerowin;
19685 #ifdef NETFLIX_STATS
19686 ti->tcpi_total_tlp = tp->t_sndtlppack;
19687 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte;
19688 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo));
19691 if (tp->t_flags & TF_TOE) {
19692 ti->tcpi_options |= TCPI_OPT_TOE;
19693 tcp_offload_tcp_info(tp, ti);
19699 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
19700 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
19702 int32_t error, optval;
19703 uint64_t val, loptval;
19704 struct tcp_info ti;
19706 * Because all our options are either boolean or an int, we can just
19707 * pull everything into optval and then unlock and copy. If we ever
19708 * add a option that is not a int, then this will have quite an
19709 * impact to this routine.
19712 switch (sopt->sopt_name) {
19714 /* First get the info filled */
19715 rack_fill_info(tp, &ti);
19716 /* Fix up the rtt related fields if needed */
19718 error = sooptcopyout(sopt, &ti, sizeof ti);
19721 * Beta is the congestion control value for NewReno that influences how
19722 * much of a backoff happens when loss is detected. It is normally set
19723 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value
19724 * when you exit recovery.
19726 case TCP_RACK_PACING_BETA:
19727 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0)
19729 else if (rack->rc_pacing_cc_set == 0)
19730 optval = rack->r_ctl.rc_saved_beta.beta;
19733 * Reach out into the CC data and report back what
19734 * I have previously set. Yeah it looks hackish but
19735 * we don't want to report the saved values.
19737 if (tp->ccv->cc_data)
19738 optval = ((struct newreno *)tp->ccv->cc_data)->beta;
19744 * Beta_ecn is the congestion control value for NewReno that influences how
19745 * much of a backoff happens when a ECN mark is detected. It is normally set
19746 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when
19747 * you exit recovery. Note that classic ECN has a beta of 50, it is only
19748 * ABE Ecn that uses this "less" value, but we do too with pacing :)
19751 case TCP_RACK_PACING_BETA_ECN:
19752 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0)
19754 else if (rack->rc_pacing_cc_set == 0)
19755 optval = rack->r_ctl.rc_saved_beta.beta_ecn;
19758 * Reach out into the CC data and report back what
19759 * I have previously set. Yeah it looks hackish but
19760 * we don't want to report the saved values.
19762 if (tp->ccv->cc_data)
19763 optval = ((struct newreno *)tp->ccv->cc_data)->beta_ecn;
19768 case TCP_FAST_RSM_HACK:
19769 optval = rack->fast_rsm_hack;
19771 case TCP_DEFER_OPTIONS:
19772 optval = rack->defer_options;
19774 case TCP_RACK_MEASURE_CNT:
19775 optval = rack->r_ctl.req_measurements;
19777 case TCP_REC_ABC_VAL:
19778 optval = rack->r_use_labc_for_rec;
19780 case TCP_RACK_ABC_VAL:
19781 optval = rack->rc_labc;
19783 case TCP_HDWR_UP_ONLY:
19784 optval= rack->r_up_only;
19786 case TCP_PACING_RATE_CAP:
19787 loptval = rack->r_ctl.bw_rate_cap;
19789 case TCP_RACK_PROFILE:
19790 /* You cannot retrieve a profile, its write only */
19793 case TCP_USE_CMP_ACKS:
19794 optval = rack->r_use_cmp_ack;
19796 case TCP_RACK_PACE_TO_FILL:
19797 optval = rack->rc_pace_to_cwnd;
19798 if (optval && rack->r_fill_less_agg)
19801 case TCP_RACK_NO_PUSH_AT_MAX:
19802 optval = rack->r_ctl.rc_no_push_at_mrtt;
19804 case TCP_SHARED_CWND_ENABLE:
19805 optval = rack->rack_enable_scwnd;
19807 case TCP_RACK_NONRXT_CFG_RATE:
19808 optval = rack->rack_rec_nonrxt_use_cr;
19811 if (rack->rack_no_prr == 1)
19813 else if (rack->no_prr_addback == 1)
19818 case TCP_RACK_DO_DETECTION:
19819 optval = rack->do_detection;
19821 case TCP_RACK_MBUF_QUEUE:
19822 /* Now do we use the LRO mbuf-queue feature */
19823 optval = rack->r_mbuf_queue;
19825 case TCP_TIMELY_DYN_ADJ:
19826 optval = rack->rc_gp_dyn_mul;
19828 case TCP_BBR_IWINTSO:
19829 optval = rack->rc_init_win;
19831 case TCP_RACK_TLP_REDUCE:
19832 /* RACK TLP cwnd reduction (bool) */
19833 optval = rack->r_ctl.rc_tlp_cwnd_reduce;
19835 case TCP_BBR_RACK_INIT_RATE:
19836 val = rack->r_ctl.init_rate;
19837 /* convert to kbits per sec */
19840 optval = (uint32_t)val;
19842 case TCP_RACK_FORCE_MSEG:
19843 optval = rack->rc_force_max_seg;
19845 case TCP_RACK_PACE_MAX_SEG:
19846 /* Max segments in a pace */
19847 optval = rack->rc_user_set_max_segs;
19849 case TCP_RACK_PACE_ALWAYS:
19850 /* Use the always pace method */
19851 optval = rack->rc_always_pace;
19853 case TCP_RACK_PRR_SENDALOT:
19854 /* Allow PRR to send more than one seg */
19855 optval = rack->r_ctl.rc_prr_sendalot;
19857 case TCP_RACK_MIN_TO:
19858 /* Minimum time between rack t-o's in ms */
19859 optval = rack->r_ctl.rc_min_to;
19861 case TCP_RACK_EARLY_SEG:
19862 /* If early recovery max segments */
19863 optval = rack->r_ctl.rc_early_recovery_segs;
19865 case TCP_RACK_REORD_THRESH:
19866 /* RACK reorder threshold (shift amount) */
19867 optval = rack->r_ctl.rc_reorder_shift;
19869 case TCP_RACK_REORD_FADE:
19870 /* Does reordering fade after ms time */
19871 optval = rack->r_ctl.rc_reorder_fade;
19873 case TCP_BBR_USE_RACK_RR:
19874 /* Do we use the rack cheat for rxt */
19875 optval = rack->use_rack_rr;
19877 case TCP_RACK_RR_CONF:
19878 optval = rack->r_rr_config;
19880 case TCP_HDWR_RATE_CAP:
19881 optval = rack->r_rack_hw_rate_caps;
19883 case TCP_BBR_HDWR_PACE:
19884 optval = rack->rack_hdw_pace_ena;
19886 case TCP_RACK_TLP_THRESH:
19887 /* RACK TLP theshold i.e. srtt+(srtt/N) */
19888 optval = rack->r_ctl.rc_tlp_threshold;
19890 case TCP_RACK_PKT_DELAY:
19891 /* RACK added ms i.e. rack-rtt + reord + N */
19892 optval = rack->r_ctl.rc_pkt_delay;
19894 case TCP_RACK_TLP_USE:
19895 optval = rack->rack_tlp_threshold_use;
19897 case TCP_RACK_PACE_RATE_CA:
19898 optval = rack->r_ctl.rc_fixed_pacing_rate_ca;
19900 case TCP_RACK_PACE_RATE_SS:
19901 optval = rack->r_ctl.rc_fixed_pacing_rate_ss;
19903 case TCP_RACK_PACE_RATE_REC:
19904 optval = rack->r_ctl.rc_fixed_pacing_rate_rec;
19906 case TCP_RACK_GP_INCREASE_SS:
19907 optval = rack->r_ctl.rack_per_of_gp_ca;
19909 case TCP_RACK_GP_INCREASE_CA:
19910 optval = rack->r_ctl.rack_per_of_gp_ss;
19912 case TCP_BBR_RACK_RTT_USE:
19913 optval = rack->r_ctl.rc_rate_sample_method;
19916 optval = tp->t_delayed_ack;
19918 case TCP_DATA_AFTER_CLOSE:
19919 optval = rack->rc_allow_data_af_clo;
19921 case TCP_SHARED_CWND_TIME_LIMIT:
19922 optval = rack->r_limit_scw;
19924 case TCP_RACK_TIMER_SLOP:
19925 optval = rack->r_ctl.timer_slop;
19928 return (tcp_default_ctloutput(so, sopt, inp, tp));
19933 if (TCP_PACING_RATE_CAP)
19934 error = sooptcopyout(sopt, &loptval, sizeof loptval);
19936 error = sooptcopyout(sopt, &optval, sizeof optval);
19942 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp)
19944 int32_t error = EINVAL;
19945 struct tcp_rack *rack;
19947 rack = (struct tcp_rack *)tp->t_fb_ptr;
19948 if (rack == NULL) {
19952 if (sopt->sopt_dir == SOPT_SET) {
19953 return (rack_set_sockopt(so, sopt, inp, tp, rack));
19954 } else if (sopt->sopt_dir == SOPT_GET) {
19955 return (rack_get_sockopt(so, sopt, inp, tp, rack));
19962 static const char *rack_stack_names[] = {
19963 __XSTRING(STACKNAME),
19965 __XSTRING(STACKALIAS),
19970 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
19972 memset(mem, 0, size);
19977 rack_dtor(void *mem, int32_t size, void *arg)
19982 static bool rack_mod_inited = false;
19985 tcp_addrack(module_t mod, int32_t type, void *data)
19992 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
19993 sizeof(struct rack_sendmap),
19994 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
19996 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
19997 sizeof(struct tcp_rack),
19998 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
20000 sysctl_ctx_init(&rack_sysctl_ctx);
20001 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
20002 SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
20005 __XSTRING(STACKALIAS),
20007 __XSTRING(STACKNAME),
20009 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
20011 if (rack_sysctl_root == NULL) {
20012 printf("Failed to add sysctl node\n");
20016 rack_init_sysctls();
20017 num_stacks = nitems(rack_stack_names);
20018 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
20019 rack_stack_names, &num_stacks);
20021 printf("Failed to register %s stack name for "
20022 "%s module\n", rack_stack_names[num_stacks],
20023 __XSTRING(MODNAME));
20024 sysctl_ctx_free(&rack_sysctl_ctx);
20026 uma_zdestroy(rack_zone);
20027 uma_zdestroy(rack_pcb_zone);
20028 rack_counter_destroy();
20029 printf("Failed to register rack module -- err:%d\n", err);
20032 tcp_lro_reg_mbufq();
20033 rack_mod_inited = true;
20036 err = deregister_tcp_functions(&__tcp_rack, true, false);
20039 err = deregister_tcp_functions(&__tcp_rack, false, true);
20042 if (rack_mod_inited) {
20043 uma_zdestroy(rack_zone);
20044 uma_zdestroy(rack_pcb_zone);
20045 sysctl_ctx_free(&rack_sysctl_ctx);
20046 rack_counter_destroy();
20047 rack_mod_inited = false;
20049 tcp_lro_dereg_mbufq();
20053 return (EOPNOTSUPP);
20058 static moduledata_t tcp_rack = {
20059 .name = __XSTRING(MODNAME),
20060 .evhand = tcp_addrack,
20064 MODULE_VERSION(MODNAME, 1);
20065 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
20066 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);